Grokbase Groups Hive commits May 2016
FAQ
Repository: hive
Updated Branches:
   refs/heads/java8 2a455f4ae -> 2695a6356 (forced update)


HIVE-4924: JDBC: Support query timeout for jdbc (Vaibhav Gumashta reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b6218275
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b6218275
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b6218275

Branch: refs/heads/java8
Commit: b6218275b00b64aed7efaf470784cc0441464f67
Parents: 0a5bc94
Author: Vaibhav Gumashta <vgumashta@hortonworks.com>
Authored: Tue May 3 12:49:22 2016 -0700
Committer: Vaibhav Gumashta <vgumashta@hortonworks.com>
Committed: Tue May 3 12:49:22 2016 -0700

----------------------------------------------------------------------
  .../org/apache/hive/jdbc/TestJdbcDriver2.java | 43 +++++++-
  .../cli/session/TestHiveSessionImpl.java | 2 +-
  .../org/apache/hive/jdbc/HiveStatement.java | 20 ++--
  .../java/org/apache/hadoop/hive/ql/Driver.java | 27 +++--
  .../hadoop/hive/ql/history/HiveHistoryImpl.java | 8 +-
  .../hadoop/hive/ql/session/OperationLog.java | 8 +-
  service-rpc/if/TCLIService.thrift | 6 +
  .../gen/thrift/gen-cpp/TCLIService_types.cpp | 30 ++++-
  .../src/gen/thrift/gen-cpp/TCLIService_types.h | 15 ++-
  .../rpc/thrift/TExecuteStatementReq.java | 109 ++++++++++++++++++-
  .../service/rpc/thrift/TOperationState.java | 5 +-
  service-rpc/src/gen/thrift/gen-php/Types.php | 25 +++++
  .../src/gen/thrift/gen-py/TCLIService/ttypes.py | 18 ++-
  .../gen/thrift/gen-rb/t_c_l_i_service_types.rb | 9 +-
  .../org/apache/hive/service/cli/CLIService.java | 46 ++++++--
  .../service/cli/EmbeddedCLIServiceClient.java | 19 ++--
  .../apache/hive/service/cli/ICLIService.java | 16 +--
  .../apache/hive/service/cli/OperationState.java | 7 +-
  .../operation/ExecuteStatementOperation.java | 9 +-
  .../cli/operation/HiveCommandOperation.java | 5 +
  .../cli/operation/MetadataOperation.java | 7 +-
  .../hive/service/cli/operation/Operation.java | 17 +--
  .../service/cli/operation/OperationManager.java | 27 +++--
  .../service/cli/operation/SQLOperation.java | 106 +++++++++++++-----
  .../hive/service/cli/session/HiveSession.java | 28 ++++-
  .../service/cli/session/HiveSessionImpl.java | 38 ++++---
  .../thrift/RetryingThriftCLIServiceClient.java | 22 +++-
  .../service/cli/thrift/ThriftCLIService.java | 16 +--
  .../cli/thrift/ThriftCLIServiceClient.java | 32 +++---
  .../cli/thrift/ThriftCLIServiceTest.java | 6 +-
  .../thrift/ThriftCliServiceTestWithCookie.java | 3 +-
  31 files changed, 557 insertions(+), 172 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 73bc620..7243648 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -55,6 +55,7 @@ import java.sql.PreparedStatement;
  import java.sql.ResultSet;
  import java.sql.ResultSetMetaData;
  import java.sql.SQLException;
+import java.sql.SQLTimeoutException;
  import java.sql.SQLWarning;
  import java.sql.Statement;
  import java.sql.Timestamp;
@@ -2384,7 +2385,7 @@ public void testParseUrlHttpMode() throws SQLException, JdbcUriParseException,
          try {
            System.out.println("Executing query: ");
            stmt.executeQuery("select sleepUDF(t1.under_col) as u0, t1.under_col as u1, " +
- "t2.under_col as u2 from " + tableName + "t1 join " + tableName +
+ "t2.under_col as u2 from " + tableName + " t1 join " + tableName +
                " t2 on t1.under_col = t2.under_col");
            fail("Expecting SQLException");
          } catch (SQLException e) {
@@ -2399,7 +2400,7 @@ public void testParseUrlHttpMode() throws SQLException, JdbcUriParseException,
        @Override
        public void run() {
          try {
- Thread.sleep(1000);
+ Thread.sleep(10000);
            System.out.println("Cancelling query: ");
            stmt.cancel();
          } catch (Exception e) {
@@ -2414,6 +2415,44 @@ public void testParseUrlHttpMode() throws SQLException, JdbcUriParseException,
      stmt.close();
    }

+ @Test
+ public void testQueryTimeout() throws Exception {
+ String udfName = SleepUDF.class.getName();
+ Statement stmt1 = con.createStatement();
+ stmt1.execute("create temporary function sleepUDF as '" + udfName + "'");
+ stmt1.close();
+ Statement stmt = con.createStatement();
+ // Test a query where timeout kicks in
+ // Set query timeout to 15 seconds
+ stmt.setQueryTimeout(15);
+ System.err.println("Executing query: ");
+ try {
+ // Sleep UDF sleeps for 100ms for each select call
+ // The test table has 500 rows, so that should be sufficient time
+ stmt.executeQuery("select sleepUDF(t1.under_col) as u0, t1.under_col as u1, "
+ + "t2.under_col as u2 from " + tableName + " t1 join " + tableName
+ + " t2 on t1.under_col = t2.under_col");
+ fail("Expecting SQLTimeoutException");
+ } catch (SQLTimeoutException e) {
+ assertNotNull(e);
+ System.err.println(e.toString());
+ } catch (SQLException e) {
+ fail("Expecting SQLTimeoutException, but got SQLException: " + e);
+ e.printStackTrace();
+ }
+
+ // Test a query where timeout does not kick in. Set it to 25s
+ stmt.setQueryTimeout(25);
+ try {
+ stmt.executeQuery("show tables");
+ } catch (SQLException e) {
+ fail("Unexpected SQLException: " + e);
+ e.printStackTrace();
+ }
+
+ stmt.close();
+ }
+
    /**
     * Test the non-null value of the Yarn ATS GUID.
     * We spawn 2 threads - one running the query and

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestHiveSessionImpl.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestHiveSessionImpl.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestHiveSessionImpl.java
index 4d763d2..c9e6a13 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestHiveSessionImpl.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestHiveSessionImpl.java
@@ -70,7 +70,7 @@ public class TestHiveSessionImpl {
      Map<String, String> confOverlay = new HashMap<String, String>();
      String hql = "drop table if exists table_not_exists";
      Mockito.when(operationManager.newExecuteStatementOperation(same(session), eq(hql),
- (Map<String, String>)Mockito.any(), eq(true))).thenReturn(operation);
+ (Map<String, String>)Mockito.any(), eq(true), eq(0))).thenReturn(operation);

      try {


http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
index 3cc6b74..38ccc78 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
@@ -43,6 +43,7 @@ import java.sql.Connection;
  import java.sql.ResultSet;
  import java.sql.SQLException;
  import java.sql.SQLFeatureNotSupportedException;
+import java.sql.SQLTimeoutException;
  import java.sql.SQLWarning;
  import java.util.ArrayList;
  import java.util.HashMap;
@@ -111,6 +112,8 @@ public class HiveStatement implements java.sql.Statement {
     */
    private boolean isExecuteStatementFailed = false;

+ private int queryTimeout = 0;
+
    public HiveStatement(HiveConnection connection, TCLIService.Iface client,
        TSessionHandle sessHandle) {
      this(connection, client, sessHandle, false, DEFAULT_FETCH_SIZE);
@@ -300,7 +303,7 @@ public class HiveStatement implements java.sql.Statement {
       */
      execReq.setRunAsync(true);
      execReq.setConfOverlay(sessConf);
-
+ execReq.setQueryTimeout(queryTimeout);
      try {
        TExecuteStatementResp execResp = client.ExecuteStatement(execReq);
        Utils.verifySuccessWithInfo(execResp.getStatus());
@@ -323,8 +326,8 @@ public class HiveStatement implements java.sql.Statement {
      while (!isOperationComplete) {
        try {
          /**
- * For an async SQLOperation, GetOperationStatus will use the long polling approach
- * It will essentially return after the HIVE_SERVER2_LONG_POLLING_TIMEOUT (a server config) expires
+ * For an async SQLOperation, GetOperationStatus will use the long polling approach It will
+ * essentially return after the HIVE_SERVER2_LONG_POLLING_TIMEOUT (a server config) expires
           */
          statusResp = client.GetOperationStatus(statusReq);
          Utils.verifySuccessWithInfo(statusResp.getStatus());
@@ -338,10 +341,12 @@ public class HiveStatement implements java.sql.Statement {
            case CANCELED_STATE:
              // 01000 -> warning
              throw new SQLException("Query was cancelled", "01000");
+ case TIMEDOUT_STATE:
+ throw new SQLTimeoutException("Query timed out after " + queryTimeout + " seconds");
            case ERROR_STATE:
              // Get the error details from the underlying exception
- throw new SQLException(statusResp.getErrorMessage(),
- statusResp.getSqlState(), statusResp.getErrorCode());
+ throw new SQLException(statusResp.getErrorMessage(), statusResp.getSqlState(),
+ statusResp.getErrorCode());
            case UKNOWN_STATE:
              throw new SQLException("Unknown query", "HY000");
            case INITIALIZED_STATE:
@@ -787,10 +792,7 @@ public class HiveStatement implements java.sql.Statement {

    @Override
    public void setQueryTimeout(int seconds) throws SQLException {
- // 0 is supported which means "no limit"
- if (seconds != 0) {
- throw new SQLException("Query timeout seconds must be 0");
- }
+ this.queryTimeout = seconds;
    }

    /*

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 32d2cb2..6a610cb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -1725,20 +1725,31 @@ public class Driver implements CommandProcessor {
        }
        LOG.info("Completed executing command(queryId=" + queryId + "); Time taken: " + duration + " seconds");
      }
- plan.setDone();

- if (SessionState.get() != null) {
- try {
- SessionState.get().getHiveHistory().logPlanProgress(plan);
- } catch (Exception e) {
- // ignore
- }
+ releasePlan(plan);
+
+ if (console != null) {
+ console.printInfo("OK");
      }
- console.printInfo("OK");

      return (0);
    }

+ private synchronized void releasePlan(QueryPlan plan) {
+ // Plan maybe null if Driver.close is called in another thread for the same Driver object
+ if (plan != null) {
+ plan.setDone();
+ if (SessionState.get() != null) {
+ try {
+ SessionState.get().getHiveHistory().logPlanProgress(plan);
+ } catch (Exception e) {
+ // Log and ignore
+ LOG.warn("Could not log query plan progress", e);
+ }
+ }
+ }
+ }
+
    private void setQueryDisplays(List<Task<? extends Serializable>> tasks) {
      if (tasks != null) {
        for (Task<? extends Serializable> task : tasks) {

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java
index 0234fd9..6582cdd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java
@@ -315,9 +315,11 @@ public class HiveHistoryImpl implements HiveHistory{

    @Override
    public void logPlanProgress(QueryPlan plan) throws IOException {
- Map<String,String> ctrmap = ctrMapFactory.get();
- ctrmap.put("plan", plan.toString());
- log(RecordTypes.Counters, ctrmap);
+ if (plan != null) {
+ Map<String,String> ctrmap = ctrMapFactory.get();
+ ctrmap.put("plan", plan.toString());
+ log(RecordTypes.Counters, ctrmap);
+ }
    }

    @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/ql/src/java/org/apache/hadoop/hive/ql/session/OperationLog.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/OperationLog.java b/ql/src/java/org/apache/hadoop/hive/ql/session/OperationLog.java
index 6d0f14a..18216f2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/OperationLog.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/OperationLog.java
@@ -166,7 +166,7 @@ public class OperationLog {
        return readResults(maxRows);
      }

- void remove() {
+ synchronized void remove() {
        try {
          if (in != null) {
            in.close();
@@ -174,8 +174,10 @@ public class OperationLog {
          if (out != null) {
            out.close();
          }
- FileUtils.forceDelete(file);
- isRemoved = true;
+ if (!isRemoved) {
+ FileUtils.forceDelete(file);
+ isRemoved = true;
+ }
        } catch (Exception e) {
          LOG.error("Failed to remove corresponding log file of operation: " + operationName, e);
        }

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service-rpc/if/TCLIService.thrift
----------------------------------------------------------------------
diff --git a/service-rpc/if/TCLIService.thrift b/service-rpc/if/TCLIService.thrift
index 92bcf77..9879b1b 100644
--- a/service-rpc/if/TCLIService.thrift
+++ b/service-rpc/if/TCLIService.thrift
@@ -458,6 +458,9 @@ enum TOperationState {

    // The operation is in an pending state
    PENDING_STATE,
+
+ // The operation is in an timedout state
+ TIMEDOUT_STATE,
  }

  // A string identifier. This is interpreted literally.
@@ -697,6 +700,9 @@ struct TExecuteStatementReq {

    // Execute asynchronously when runAsync is true
    4: optional bool runAsync = false
+
+ // The number of seconds after which the query will timeout on the server
+ 5: optional i64 queryTimeout = 0
  }

  struct TExecuteStatementResp {

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
index 66f5e8c..5229230 100644
--- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
+++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
@@ -109,7 +109,8 @@ int _kTOperationStateValues[] = {
    TOperationState::CLOSED_STATE,
    TOperationState::ERROR_STATE,
    TOperationState::UKNOWN_STATE,
- TOperationState::PENDING_STATE
+ TOperationState::PENDING_STATE,
+ TOperationState::TIMEDOUT_STATE
  };
  const char* _kTOperationStateNames[] = {
    "INITIALIZED_STATE",
@@ -119,9 +120,10 @@ const char* _kTOperationStateNames[] = {
    "CLOSED_STATE",
    "ERROR_STATE",
    "UKNOWN_STATE",
- "PENDING_STATE"
+ "PENDING_STATE",
+ "TIMEDOUT_STATE"
  };
-const std::map<int, const char*> _TOperationState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(8, _kTOperationStateValues, _kTOperationStateNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+const std::map<int, const char*> _TOperationState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(9, _kTOperationStateValues, _kTOperationStateNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));

  int _kTOperationTypeValues[] = {
    TOperationType::EXECUTE_STATEMENT,
@@ -5575,6 +5577,11 @@ void TExecuteStatementReq::__set_runAsync(const bool val) {
  __isset.runAsync = true;
  }

+void TExecuteStatementReq::__set_queryTimeout(const int64_t val) {
+ this->queryTimeout = val;
+__isset.queryTimeout = true;
+}
+
  uint32_t TExecuteStatementReq::read(::apache::thrift::protocol::TProtocol* iprot) {

    apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -5645,6 +5652,14 @@ uint32_t TExecuteStatementReq::read(::apache::thrift::protocol::TProtocol* iprot
            xfer += iprot->skip(ftype);
          }
          break;
+ case 5:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->queryTimeout);
+ this->__isset.queryTimeout = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
        default:
          xfer += iprot->skip(ftype);
          break;
@@ -5693,6 +5708,11 @@ uint32_t TExecuteStatementReq::write(::apache::thrift::protocol::TProtocol* opro
      xfer += oprot->writeBool(this->runAsync);
      xfer += oprot->writeFieldEnd();
    }
+ if (this->__isset.queryTimeout) {
+ xfer += oprot->writeFieldBegin("queryTimeout", ::apache::thrift::protocol::T_I64, 5);
+ xfer += oprot->writeI64(this->queryTimeout);
+ xfer += oprot->writeFieldEnd();
+ }
    xfer += oprot->writeFieldStop();
    xfer += oprot->writeStructEnd();
    return xfer;
@@ -5704,6 +5724,7 @@ void swap(TExecuteStatementReq &a, TExecuteStatementReq &b) {
    swap(a.statement, b.statement);
    swap(a.confOverlay, b.confOverlay);
    swap(a.runAsync, b.runAsync);
+ swap(a.queryTimeout, b.queryTimeout);
    swap(a.__isset, b.__isset);
  }

@@ -5712,6 +5733,7 @@ TExecuteStatementReq::TExecuteStatementReq(const TExecuteStatementReq& other222)
    statement = other222.statement;
    confOverlay = other222.confOverlay;
    runAsync = other222.runAsync;
+ queryTimeout = other222.queryTimeout;
    __isset = other222.__isset;
  }
  TExecuteStatementReq& TExecuteStatementReq::operator=(const TExecuteStatementReq& other223) {
@@ -5719,6 +5741,7 @@ TExecuteStatementReq& TExecuteStatementReq::operator=(const TExecuteStatementReq
    statement = other223.statement;
    confOverlay = other223.confOverlay;
    runAsync = other223.runAsync;
+ queryTimeout = other223.queryTimeout;
    __isset = other223.__isset;
    return *this;
  }
@@ -5729,6 +5752,7 @@ void TExecuteStatementReq::printTo(std::ostream& out) const {
    out << ", " << "statement=" << to_string(statement);
    out << ", " << "confOverlay="; (__isset.confOverlay ? (out << to_string(confOverlay)) : (out << "<null>"));
    out << ", " << "runAsync="; (__isset.runAsync ? (out << to_string(runAsync)) : (out << "<null>"));
+ out << ", " << "queryTimeout="; (__isset.queryTimeout ? (out << to_string(queryTimeout)) : (out << "<null>"));
    out << ")";
  }


http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h
index 9f937ca..838bf17 100644
--- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h
+++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h
@@ -84,7 +84,8 @@ struct TOperationState {
      CLOSED_STATE = 4,
      ERROR_STATE = 5,
      UKNOWN_STATE = 6,
- PENDING_STATE = 7
+ PENDING_STATE = 7,
+ TIMEDOUT_STATE = 8
    };
  };

@@ -2501,9 +2502,10 @@ inline std::ostream& operator<<(std::ostream& out, const TGetInfoResp& obj)
  }

  typedef struct _TExecuteStatementReq__isset {
- _TExecuteStatementReq__isset() : confOverlay(false), runAsync(true) {}
+ _TExecuteStatementReq__isset() : confOverlay(false), runAsync(true), queryTimeout(true) {}
    bool confOverlay :1;
    bool runAsync :1;
+ bool queryTimeout :1;
  } _TExecuteStatementReq__isset;

  class TExecuteStatementReq {
@@ -2511,7 +2513,7 @@ class TExecuteStatementReq {

    TExecuteStatementReq(const TExecuteStatementReq&);
    TExecuteStatementReq& operator=(const TExecuteStatementReq&);
- TExecuteStatementReq() : statement(), runAsync(false) {
+ TExecuteStatementReq() : statement(), runAsync(false), queryTimeout(0LL) {
    }

    virtual ~TExecuteStatementReq() throw();
@@ -2519,6 +2521,7 @@ class TExecuteStatementReq {
    std::string statement;
    std::map<std::string, std::string> confOverlay;
    bool runAsync;
+ int64_t queryTimeout;

    _TExecuteStatementReq__isset __isset;

@@ -2530,6 +2533,8 @@ class TExecuteStatementReq {

    void __set_runAsync(const bool val);

+ void __set_queryTimeout(const int64_t val);
+
    bool operator == (const TExecuteStatementReq & rhs) const
    {
      if (!(sessionHandle == rhs.sessionHandle))
@@ -2544,6 +2549,10 @@ class TExecuteStatementReq {
        return false;
      else if (__isset.runAsync && !(runAsync == rhs.runAsync))
        return false;
+ if (__isset.queryTimeout != rhs.__isset.queryTimeout)
+ return false;
+ else if (__isset.queryTimeout && !(queryTimeout == rhs.queryTimeout))
+ return false;
      return true;
    }
    bool operator != (const TExecuteStatementReq &rhs) const {

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TExecuteStatementReq.java
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TExecuteStatementReq.java b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TExecuteStatementReq.java
index 2eb4d09..1f73cec 100644
--- a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TExecuteStatementReq.java
+++ b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TExecuteStatementReq.java
@@ -42,6 +42,7 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
    private static final org.apache.thrift.protocol.TField STATEMENT_FIELD_DESC = new org.apache.thrift.protocol.TField("statement", org.apache.thrift.protocol.TType.STRING, (short)2);
    private static final org.apache.thrift.protocol.TField CONF_OVERLAY_FIELD_DESC = new org.apache.thrift.protocol.TField("confOverlay", org.apache.thrift.protocol.TType.MAP, (short)3);
    private static final org.apache.thrift.protocol.TField RUN_ASYNC_FIELD_DESC = new org.apache.thrift.protocol.TField("runAsync", org.apache.thrift.protocol.TType.BOOL, (short)4);
+ private static final org.apache.thrift.protocol.TField QUERY_TIMEOUT_FIELD_DESC = new org.apache.thrift.protocol.TField("queryTimeout", org.apache.thrift.protocol.TType.I64, (short)5);

    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
    static {
@@ -53,13 +54,15 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
    private String statement; // required
    private Map<String,String> confOverlay; // optional
    private boolean runAsync; // optional
+ private long queryTimeout; // optional

    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
      SESSION_HANDLE((short)1, "sessionHandle"),
      STATEMENT((short)2, "statement"),
      CONF_OVERLAY((short)3, "confOverlay"),
- RUN_ASYNC((short)4, "runAsync");
+ RUN_ASYNC((short)4, "runAsync"),
+ QUERY_TIMEOUT((short)5, "queryTimeout");

      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();

@@ -82,6 +85,8 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
            return CONF_OVERLAY;
          case 4: // RUN_ASYNC
            return RUN_ASYNC;
+ case 5: // QUERY_TIMEOUT
+ return QUERY_TIMEOUT;
          default:
            return null;
        }
@@ -123,8 +128,9 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta

    // isset id assignments
    private static final int __RUNASYNC_ISSET_ID = 0;
+ private static final int __QUERYTIMEOUT_ISSET_ID = 1;
    private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.CONF_OVERLAY,_Fields.RUN_ASYNC};
+ private static final _Fields optionals[] = {_Fields.CONF_OVERLAY,_Fields.RUN_ASYNC,_Fields.QUERY_TIMEOUT};
    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
    static {
      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -138,6 +144,8 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
      tmpMap.put(_Fields.RUN_ASYNC, new org.apache.thrift.meta_data.FieldMetaData("runAsync", org.apache.thrift.TFieldRequirementType.OPTIONAL,
          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+ tmpMap.put(_Fields.QUERY_TIMEOUT, new org.apache.thrift.meta_data.FieldMetaData("queryTimeout", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
      metaDataMap = Collections.unmodifiableMap(tmpMap);
      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TExecuteStatementReq.class, metaDataMap);
    }
@@ -145,6 +153,8 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
    public TExecuteStatementReq() {
      this.runAsync = false;

+ this.queryTimeout = 0L;
+
    }

    public TExecuteStatementReq(
@@ -172,6 +182,7 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
        this.confOverlay = __this__confOverlay;
      }
      this.runAsync = other.runAsync;
+ this.queryTimeout = other.queryTimeout;
    }

    public TExecuteStatementReq deepCopy() {
@@ -185,6 +196,8 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
      this.confOverlay = null;
      this.runAsync = false;

+ this.queryTimeout = 0L;
+
    }

    public TSessionHandle getSessionHandle() {
@@ -289,6 +302,28 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
      __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RUNASYNC_ISSET_ID, value);
    }

+ public long getQueryTimeout() {
+ return this.queryTimeout;
+ }
+
+ public void setQueryTimeout(long queryTimeout) {
+ this.queryTimeout = queryTimeout;
+ setQueryTimeoutIsSet(true);
+ }
+
+ public void unsetQueryTimeout() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __QUERYTIMEOUT_ISSET_ID);
+ }
+
+ /** Returns true if field queryTimeout is set (has been assigned a value) and false otherwise */
+ public boolean isSetQueryTimeout() {
+ return EncodingUtils.testBit(__isset_bitfield, __QUERYTIMEOUT_ISSET_ID);
+ }
+
+ public void setQueryTimeoutIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __QUERYTIMEOUT_ISSET_ID, value);
+ }
+
    public void setFieldValue(_Fields field, Object value) {
      switch (field) {
      case SESSION_HANDLE:
@@ -323,6 +358,14 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
        }
        break;

+ case QUERY_TIMEOUT:
+ if (value == null) {
+ unsetQueryTimeout();
+ } else {
+ setQueryTimeout((Long)value);
+ }
+ break;
+
      }
    }

@@ -340,6 +383,9 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
      case RUN_ASYNC:
        return isRunAsync();

+ case QUERY_TIMEOUT:
+ return getQueryTimeout();
+
      }
      throw new IllegalStateException();
    }
@@ -359,6 +405,8 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
        return isSetConfOverlay();
      case RUN_ASYNC:
        return isSetRunAsync();
+ case QUERY_TIMEOUT:
+ return isSetQueryTimeout();
      }
      throw new IllegalStateException();
    }
@@ -412,6 +460,15 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
          return false;
      }

+ boolean this_present_queryTimeout = true && this.isSetQueryTimeout();
+ boolean that_present_queryTimeout = true && that.isSetQueryTimeout();
+ if (this_present_queryTimeout || that_present_queryTimeout) {
+ if (!(this_present_queryTimeout && that_present_queryTimeout))
+ return false;
+ if (this.queryTimeout != that.queryTimeout)
+ return false;
+ }
+
      return true;
    }

@@ -439,6 +496,11 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
      if (present_runAsync)
        list.add(runAsync);

+ boolean present_queryTimeout = true && (isSetQueryTimeout());
+ list.add(present_queryTimeout);
+ if (present_queryTimeout)
+ list.add(queryTimeout);
+
      return list.hashCode();
    }

@@ -490,6 +552,16 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
          return lastComparison;
        }
      }
+ lastComparison = Boolean.valueOf(isSetQueryTimeout()).compareTo(other.isSetQueryTimeout());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetQueryTimeout()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.queryTimeout, other.queryTimeout);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
      return 0;
    }

@@ -541,6 +613,12 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
        sb.append(this.runAsync);
        first = false;
      }
+ if (isSetQueryTimeout()) {
+ if (!first) sb.append(", ");
+ sb.append("queryTimeout:");
+ sb.append(this.queryTimeout);
+ first = false;
+ }
      sb.append(")");
      return sb.toString();
    }
@@ -642,6 +720,14 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
              }
              break;
+ case 5: // QUERY_TIMEOUT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.queryTimeout = iprot.readI64();
+ struct.setQueryTimeoutIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
            default:
              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
          }
@@ -685,6 +771,11 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
          oprot.writeBool(struct.runAsync);
          oprot.writeFieldEnd();
        }
+ if (struct.isSetQueryTimeout()) {
+ oprot.writeFieldBegin(QUERY_TIMEOUT_FIELD_DESC);
+ oprot.writeI64(struct.queryTimeout);
+ oprot.writeFieldEnd();
+ }
        oprot.writeFieldStop();
        oprot.writeStructEnd();
      }
@@ -711,7 +802,10 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
        if (struct.isSetRunAsync()) {
          optionals.set(1);
        }
- oprot.writeBitSet(optionals, 2);
+ if (struct.isSetQueryTimeout()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
        if (struct.isSetConfOverlay()) {
          {
            oprot.writeI32(struct.confOverlay.size());
@@ -725,6 +819,9 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
        if (struct.isSetRunAsync()) {
          oprot.writeBool(struct.runAsync);
        }
+ if (struct.isSetQueryTimeout()) {
+ oprot.writeI64(struct.queryTimeout);
+ }
      }

      @Override
@@ -735,7 +832,7 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
        struct.setSessionHandleIsSet(true);
        struct.statement = iprot.readString();
        struct.setStatementIsSet(true);
- BitSet incoming = iprot.readBitSet(2);
+ BitSet incoming = iprot.readBitSet(3);
        if (incoming.get(0)) {
          {
            org.apache.thrift.protocol.TMap _map168 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
@@ -755,6 +852,10 @@ public class TExecuteStatementReq implements org.apache.thrift.TBase<TExecuteSta
          struct.runAsync = iprot.readBool();
          struct.setRunAsyncIsSet(true);
        }
+ if (incoming.get(2)) {
+ struct.queryTimeout = iprot.readI64();
+ struct.setQueryTimeoutIsSet(true);
+ }
      }
    }


http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TOperationState.java
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TOperationState.java b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TOperationState.java
index 3fa49b0..4390b4b 100644
--- a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TOperationState.java
+++ b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TOperationState.java
@@ -19,7 +19,8 @@ public enum TOperationState implements org.apache.thrift.TEnum {
    CLOSED_STATE(4),
    ERROR_STATE(5),
    UKNOWN_STATE(6),
- PENDING_STATE(7);
+ PENDING_STATE(7),
+ TIMEDOUT_STATE(8);

    private final int value;

@@ -56,6 +57,8 @@ public enum TOperationState implements org.apache.thrift.TEnum {
          return UKNOWN_STATE;
        case 7:
          return PENDING_STATE;
+ case 8:
+ return TIMEDOUT_STATE;
        default:
          return null;
      }

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service-rpc/src/gen/thrift/gen-php/Types.php
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-php/Types.php b/service-rpc/src/gen/thrift/gen-php/Types.php
index 7f1f99f..9ed7403 100644
--- a/service-rpc/src/gen/thrift/gen-php/Types.php
+++ b/service-rpc/src/gen/thrift/gen-php/Types.php
@@ -109,6 +109,7 @@ final class TOperationState {
    const ERROR_STATE = 5;
    const UKNOWN_STATE = 6;
    const PENDING_STATE = 7;
+ const TIMEDOUT_STATE = 8;
    static public $__names = array(
      0 => 'INITIALIZED_STATE',
      1 => 'RUNNING_STATE',
@@ -118,6 +119,7 @@ final class TOperationState {
      5 => 'ERROR_STATE',
      6 => 'UKNOWN_STATE',
      7 => 'PENDING_STATE',
+ 8 => 'TIMEDOUT_STATE',
    );
  }

@@ -5446,6 +5448,10 @@ class TExecuteStatementReq {
     * @var bool
     */
    public $runAsync = false;
+ /**
+ * @var int
+ */
+ public $queryTimeout = 0;

    public function __construct($vals=null) {
      if (!isset(self::$_TSPEC)) {
@@ -5475,6 +5481,10 @@ class TExecuteStatementReq {
            'var' => 'runAsync',
            'type' => TType::BOOL,
            ),
+ 5 => array(
+ 'var' => 'queryTimeout',
+ 'type' => TType::I64,
+ ),
          );
      }
      if (is_array($vals)) {
@@ -5490,6 +5500,9 @@ class TExecuteStatementReq {
        if (isset($vals['runAsync'])) {
          $this->runAsync = $vals['runAsync'];
        }
+ if (isset($vals['queryTimeout'])) {
+ $this->queryTimeout = $vals['queryTimeout'];
+ }
      }
    }

@@ -5554,6 +5567,13 @@ class TExecuteStatementReq {
              $xfer += $input->skip($ftype);
            }
            break;
+ case 5:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->queryTimeout);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
          default:
            $xfer += $input->skip($ftype);
            break;
@@ -5603,6 +5623,11 @@ class TExecuteStatementReq {
        $xfer += $output->writeBool($this->runAsync);
        $xfer += $output->writeFieldEnd();
      }
+ if ($this->queryTimeout !== null) {
+ $xfer += $output->writeFieldBegin('queryTimeout', TType::I64, 5);
+ $xfer += $output->writeI64($this->queryTimeout);
+ $xfer += $output->writeFieldEnd();
+ }
      $xfer += $output->writeFieldStop();
      $xfer += $output->writeStructEnd();
      return $xfer;

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py b/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py
index 3bb20b8..44e5462 100644
--- a/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py
+++ b/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py
@@ -154,6 +154,7 @@ class TOperationState:
    ERROR_STATE = 5
    UKNOWN_STATE = 6
    PENDING_STATE = 7
+ TIMEDOUT_STATE = 8

    _VALUES_TO_NAMES = {
      0: "INITIALIZED_STATE",
@@ -164,6 +165,7 @@ class TOperationState:
      5: "ERROR_STATE",
      6: "UKNOWN_STATE",
      7: "PENDING_STATE",
+ 8: "TIMEDOUT_STATE",
    }

    _NAMES_TO_VALUES = {
@@ -175,6 +177,7 @@ class TOperationState:
      "ERROR_STATE": 5,
      "UKNOWN_STATE": 6,
      "PENDING_STATE": 7,
+ "TIMEDOUT_STATE": 8,
    }

  class TOperationType:
@@ -4162,6 +4165,7 @@ class TExecuteStatementReq:
     - statement
     - confOverlay
     - runAsync
+ - queryTimeout
    """

    thrift_spec = (
@@ -4170,13 +4174,15 @@ class TExecuteStatementReq:
      (2, TType.STRING, 'statement', None, None, ), # 2
      (3, TType.MAP, 'confOverlay', (TType.STRING,None,TType.STRING,None), None, ), # 3
      (4, TType.BOOL, 'runAsync', None, False, ), # 4
+ (5, TType.I64, 'queryTimeout', None, 0, ), # 5
    )

- def __init__(self, sessionHandle=None, statement=None, confOverlay=None, runAsync=thrift_spec[4][4],):
+ def __init__(self, sessionHandle=None, statement=None, confOverlay=None, runAsync=thrift_spec[4][4], queryTimeout=thrift_spec[5][4],):
      self.sessionHandle = sessionHandle
      self.statement = statement
      self.confOverlay = confOverlay
      self.runAsync = runAsync
+ self.queryTimeout = queryTimeout

    def read(self, iprot):
      if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -4214,6 +4220,11 @@ class TExecuteStatementReq:
            self.runAsync = iprot.readBool()
          else:
            iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.I64:
+ self.queryTimeout = iprot.readI64()
+ else:
+ iprot.skip(ftype)
        else:
          iprot.skip(ftype)
        iprot.readFieldEnd()
@@ -4244,6 +4255,10 @@ class TExecuteStatementReq:
        oprot.writeFieldBegin('runAsync', TType.BOOL, 4)
        oprot.writeBool(self.runAsync)
        oprot.writeFieldEnd()
+ if self.queryTimeout is not None:
+ oprot.writeFieldBegin('queryTimeout', TType.I64, 5)
+ oprot.writeI64(self.queryTimeout)
+ oprot.writeFieldEnd()
      oprot.writeFieldStop()
      oprot.writeStructEnd()

@@ -4261,6 +4276,7 @@ class TExecuteStatementReq:
      value = (value * 31) ^ hash(self.statement)
      value = (value * 31) ^ hash(self.confOverlay)
      value = (value * 31) ^ hash(self.runAsync)
+ value = (value * 31) ^ hash(self.queryTimeout)
      return value

    def __repr__(self):

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb b/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb
index 7208bae..b39ec1e 100644
--- a/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb
+++ b/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb
@@ -65,8 +65,9 @@ module TOperationState
    ERROR_STATE = 5
    UKNOWN_STATE = 6
    PENDING_STATE = 7
- VALUE_MAP = {0 => "INITIALIZED_STATE", 1 => "RUNNING_STATE", 2 => "FINISHED_STATE", 3 => "CANCELED_STATE", 4 => "CLOSED_STATE", 5 => "ERROR_STATE", 6 => "UKNOWN_STATE", 7 => "PENDING_STATE"}
- VALID_VALUES = Set.new([INITIALIZED_STATE, RUNNING_STATE, FINISHED_STATE, CANCELED_STATE, CLOSED_STATE, ERROR_STATE, UKNOWN_STATE, PENDING_STATE]).freeze
+ TIMEDOUT_STATE = 8
+ VALUE_MAP = {0 => "INITIALIZED_STATE", 1 => "RUNNING_STATE", 2 => "FINISHED_STATE", 3 => "CANCELED_STATE", 4 => "CLOSED_STATE", 5 => "ERROR_STATE", 6 => "UKNOWN_STATE", 7 => "PENDING_STATE", 8 => "TIMEDOUT_STATE"}
+ VALID_VALUES = Set.new([INITIALIZED_STATE, RUNNING_STATE, FINISHED_STATE, CANCELED_STATE, CLOSED_STATE, ERROR_STATE, UKNOWN_STATE, PENDING_STATE, TIMEDOUT_STATE]).freeze
  end

  module TOperationType
@@ -1135,12 +1136,14 @@ class TExecuteStatementReq
    STATEMENT = 2
    CONFOVERLAY = 3
    RUNASYNC = 4
+ QUERYTIMEOUT = 5

    FIELDS = {
      SESSIONHANDLE => {:type => ::Thrift::Types::STRUCT, :name => 'sessionHandle', :class => ::TSessionHandle},
      STATEMENT => {:type => ::Thrift::Types::STRING, :name => 'statement'},
      CONFOVERLAY => {:type => ::Thrift::Types::MAP, :name => 'confOverlay', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}, :optional => true},
- RUNASYNC => {:type => ::Thrift::Types::BOOL, :name => 'runAsync', :default => false, :optional => true}
+ RUNASYNC => {:type => ::Thrift::Types::BOOL, :name => 'runAsync', :default => false, :optional => true},
+ QUERYTIMEOUT => {:type => ::Thrift::Types::I64, :name => 'queryTimeout', :default => 0, :optional => true}
    }

    def struct_fields; FIELDS; end

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/CLIService.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/CLIService.java b/service/src/java/org/apache/hive/service/cli/CLIService.java
index 4a83e38..ed52b4a 100644
--- a/service/src/java/org/apache/hive/service/cli/CLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/CLIService.java
@@ -248,33 +248,55 @@ public class CLIService extends CompositeService implements ICLIService {
      return infoValue;
    }

- /* (non-Javadoc)
- * @see org.apache.hive.service.cli.ICLIService#executeStatement(org.apache.hive.service.cli.SessionHandle,
- * java.lang.String, java.util.Map)
+ /**
+ * Execute statement on the server. This is a blocking call.
     */
    @Override
    public OperationHandle executeStatement(SessionHandle sessionHandle, String statement,
- Map<String, String> confOverlay)
- throws HiveSQLException {
- OperationHandle opHandle = sessionManager.getSession(sessionHandle)
- .executeStatement(statement, confOverlay);
+ Map<String, String> confOverlay) throws HiveSQLException {
+ OperationHandle opHandle =
+ sessionManager.getSession(sessionHandle).executeStatement(statement, confOverlay);
      LOG.debug(sessionHandle + ": executeStatement()");
      return opHandle;
    }

- /* (non-Javadoc)
- * @see org.apache.hive.service.cli.ICLIService#executeStatementAsync(org.apache.hive.service.cli.SessionHandle,
- * java.lang.String, java.util.Map)
+ /**
+ * Execute statement on the server with a timeout. This is a blocking call.
+ */
+ @Override
+ public OperationHandle executeStatement(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException {
+ OperationHandle opHandle =
+ sessionManager.getSession(sessionHandle).executeStatement(statement, confOverlay,
+ queryTimeout);
+ LOG.debug(sessionHandle + ": executeStatement()");
+ return opHandle;
+ }
+
+ /**
+ * Execute statement asynchronously on the server. This is a non-blocking call
     */
    @Override
    public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement,
        Map<String, String> confOverlay) throws HiveSQLException {
- OperationHandle opHandle = sessionManager.getSession(sessionHandle)
- .executeStatementAsync(statement, confOverlay);
+ OperationHandle opHandle =
+ sessionManager.getSession(sessionHandle).executeStatementAsync(statement, confOverlay);
      LOG.debug(sessionHandle + ": executeStatementAsync()");
      return opHandle;
    }

+ /**
+ * Execute statement asynchronously on the server with a timeout. This is a non-blocking call
+ */
+ @Override
+ public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException {
+ OperationHandle opHandle =
+ sessionManager.getSession(sessionHandle).executeStatementAsync(statement, confOverlay,
+ queryTimeout);
+ LOG.debug(sessionHandle + ": executeStatementAsync()");
+ return opHandle;
+ }

    /* (non-Javadoc)
     * @see org.apache.hive.service.cli.ICLIService#getTypeInfo(org.apache.hive.service.cli.SessionHandle)

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java b/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java
index 79e0024..86e9bb1 100644
--- a/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java
+++ b/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java
@@ -67,26 +67,29 @@ public class EmbeddedCLIServiceClient extends CLIServiceClient {
      return cliService.getInfo(sessionHandle, getInfoType);
    }

- /* (non-Javadoc)
- * @see org.apache.hive.service.cli.CLIServiceClient#executeStatement(org.apache.hive.service.cli.SessionHandle,
- * java.lang.String, java.util.Map)
- */
    @Override
    public OperationHandle executeStatement(SessionHandle sessionHandle, String statement,
        Map<String, String> confOverlay) throws HiveSQLException {
      return cliService.executeStatement(sessionHandle, statement, confOverlay);
    }

- /* (non-Javadoc)
- * @see org.apache.hive.service.cli.CLIServiceClient#executeStatementAsync(org.apache.hive.service.cli.SessionHandle,
- * java.lang.String, java.util.Map)
- */
+ @Override
+ public OperationHandle executeStatement(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException {
+ return cliService.executeStatement(sessionHandle, statement, confOverlay, queryTimeout);
+ }
+
    @Override
    public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement,
        Map<String, String> confOverlay) throws HiveSQLException {
      return cliService.executeStatementAsync(sessionHandle, statement, confOverlay);
    }

+ @Override
+ public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException {
+ return cliService.executeStatementAsync(sessionHandle, statement, confOverlay, queryTimeout);
+ }

    /* (non-Javadoc)
     * @see org.apache.hive.service.cli.CLIServiceClient#getTypeInfo(org.apache.hive.service.cli.SessionHandle)

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/ICLIService.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/ICLIService.java b/service/src/java/org/apache/hive/service/cli/ICLIService.java
index e4aef96..fef772d 100644
--- a/service/src/java/org/apache/hive/service/cli/ICLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/ICLIService.java
@@ -39,12 +39,16 @@ public interface ICLIService {
        throws HiveSQLException;

    OperationHandle executeStatement(SessionHandle sessionHandle, String statement,
- Map<String, String> confOverlay)
- throws HiveSQLException;
+ Map<String, String> confOverlay) throws HiveSQLException;

- OperationHandle executeStatementAsync(SessionHandle sessionHandle,
- String statement, Map<String, String> confOverlay)
- throws HiveSQLException;
+ OperationHandle executeStatement(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException;
+
+ OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay) throws HiveSQLException;
+
+ OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException;

    OperationHandle getTypeInfo(SessionHandle sessionHandle)
        throws HiveSQLException;
@@ -105,6 +109,4 @@ public interface ICLIService {
      String primaryCatalog, String primarySchema, String primaryTable,
      String foreignCatalog, String foreignSchema, String foreignTable)
      throws HiveSQLException;
-
-
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/OperationState.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/OperationState.java b/service/src/java/org/apache/hive/service/cli/OperationState.java
index 6a67a1d..ae1ff5e 100644
--- a/service/src/java/org/apache/hive/service/cli/OperationState.java
+++ b/service/src/java/org/apache/hive/service/cli/OperationState.java
@@ -32,7 +32,8 @@ public enum OperationState {
    CLOSED(TOperationState.CLOSED_STATE, true),
    ERROR(TOperationState.ERROR_STATE, true),
    UNKNOWN(TOperationState.UKNOWN_STATE, false),
- PENDING(TOperationState.PENDING_STATE, false);
+ PENDING(TOperationState.PENDING_STATE, false),
+ TIMEDOUT(TOperationState.TIMEDOUT_STATE, true);

    private final TOperationState tOperationState;
    private final boolean terminal;
@@ -57,6 +58,7 @@ public enum OperationState {
        case RUNNING:
        case CANCELED:
        case CLOSED:
+ case TIMEDOUT:
          return;
        }
        break;
@@ -67,6 +69,7 @@ public enum OperationState {
        case CANCELED:
        case ERROR:
        case CLOSED:
+ case TIMEDOUT:
          return;
        }
        break;
@@ -76,11 +79,13 @@ public enum OperationState {
        case CANCELED:
        case ERROR:
        case CLOSED:
+ case TIMEDOUT:
          return;
        }
        break;
      case FINISHED:
      case CANCELED:
+ case TIMEDOUT:
      case ERROR:
        if (OperationState.CLOSED.equals(newState)) {
          return;

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java b/service/src/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
index b3d9b52..ff46ed8 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
@@ -39,9 +39,9 @@ public abstract class ExecuteStatementOperation extends Operation {
      return statement;
    }

- public static ExecuteStatementOperation newExecuteStatementOperation(
- HiveSession parentSession, String statement, Map<String, String> confOverlay, boolean runAsync)
- throws HiveSQLException {
+ public static ExecuteStatementOperation newExecuteStatementOperation(HiveSession parentSession,
+ String statement, Map<String, String> confOverlay, boolean runAsync, long queryTimeout)
+ throws HiveSQLException {
      String[] tokens = statement.trim().split("\\s+");
      CommandProcessor processor = null;
      try {
@@ -50,7 +50,8 @@ public abstract class ExecuteStatementOperation extends Operation {
        throw new HiveSQLException(e.getMessage(), e.getSQLState(), e);
      }
      if (processor == null) {
- return new SQLOperation(parentSession, statement, confOverlay, runAsync);
+ // runAsync, queryTimeout makes sense only for a SQLOperation
+ return new SQLOperation(parentSession, statement, confOverlay, runAsync, queryTimeout);
      }
      return new HiveCommandOperation(parentSession, statement, processor, confOverlay);
    }

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java b/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
index f18dc67..8f08c2e 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
@@ -229,4 +229,9 @@ public class HiveCommandOperation extends ExecuteStatementOperation {
        resultReader = null;
      }
    }
+
+ @Override
+ public void cancel(OperationState stateAfterCancel) throws HiveSQLException {
+ throw new UnsupportedOperationException("HiveCommandOperation.cancel()");
+ }
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java b/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java
index 77228fa..fd6e428 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java
@@ -109,7 +109,7 @@ public abstract class MetadataOperation extends Operation {
      pattern = replaceAll(pattern, "^_", ".");
      return pattern;
    }
-
+
    private String replaceAll(String input, final String pattern, final String replace) {
      while (true) {
        String replaced = input.replaceAll(pattern, replace);
@@ -145,4 +145,9 @@ public abstract class MetadataOperation extends Operation {
      }
    }

+ @Override
+ public void cancel(OperationState stateAfterCancel) throws HiveSQLException {
+ throw new UnsupportedOperationException("MetadataOperation.cancel()");
+ }
+
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/operation/Operation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/Operation.java b/service/src/java/org/apache/hive/service/cli/operation/Operation.java
index 63b1a48..0932884 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/Operation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/Operation.java
@@ -327,22 +327,23 @@ public abstract class Operation {
      }
    }

- protected void cleanupOperationLog() {
+ protected synchronized void cleanupOperationLog() {
      if (isOperationLogEnabled) {
+ if (opHandle == null) {
+ LOG.warn("Operation seems to be in invalid state, opHandle is null");
+ return;
+ }
        if (operationLog == null) {
- LOG.error("Operation [ " + opHandle.getHandleIdentifier() + " ] "
- + "logging is enabled, but its OperationLog object cannot be found.");
+ LOG.warn("Operation [ " + opHandle.getHandleIdentifier() + " ] " + "logging is enabled, "
+ + "but its OperationLog object cannot be found. "
+ + "Perhaps the operation has already terminated.");
        } else {
          operationLog.close();
        }
      }
    }

- // TODO: make this abstract and implement in subclasses.
- public void cancel() throws HiveSQLException {
- setState(OperationState.CANCELED);
- throw new UnsupportedOperationException("SQLOperation.cancel()");
- }
+ public abstract void cancel(OperationState stateAfterCancel) throws HiveSQLException;

    public abstract void close() throws HiveSQLException;


http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
index 52e4b4d..2f18231 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
@@ -90,12 +90,10 @@ public class OperationManager extends AbstractService {
    @Override
    public synchronized void start() {
      super.start();
- // TODO
    }

    @Override
    public synchronized void stop() {
- // TODO
      super.stop();
    }

@@ -111,10 +109,11 @@ public class OperationManager extends AbstractService {
    }

    public ExecuteStatementOperation newExecuteStatementOperation(HiveSession parentSession,
- String statement, Map<String, String> confOverlay, boolean runAsync)
- throws HiveSQLException {
- ExecuteStatementOperation executeStatementOperation = ExecuteStatementOperation
- .newExecuteStatementOperation(parentSession, statement, confOverlay, runAsync);
+ String statement, Map<String, String> confOverlay, boolean runAsync, long queryTimeout)
+ throws HiveSQLException {
+ ExecuteStatementOperation executeStatementOperation =
+ ExecuteStatementOperation.newExecuteStatementOperation(parentSession, statement,
+ confOverlay, runAsync, queryTimeout);
      addOperation(executeStatementOperation);
      return executeStatementOperation;
    }
@@ -250,20 +249,20 @@ public class OperationManager extends AbstractService {
      return getOperation(opHandle).getStatus();
    }

+ /**
+ * Cancel the running operation unless it is already in a terminal state
+ * @param opHandle
+ * @throws HiveSQLException
+ */
    public void cancelOperation(OperationHandle opHandle) throws HiveSQLException {
      Operation operation = getOperation(opHandle);
      OperationState opState = operation.getStatus().getState();
- if (opState == OperationState.CANCELED ||
- opState == OperationState.CLOSED ||
- opState == OperationState.FINISHED ||
- opState == OperationState.ERROR ||
- opState == OperationState.UNKNOWN) {
+ if (opState.isTerminal()) {
        // Cancel should be a no-op in either cases
        LOG.debug(opHandle + ": Operation is already aborted in state - " + opState);
- }
- else {
+ } else {
        LOG.debug(opHandle + ": Attempting to cancel from state - " + opState);
- operation.cancel();
+ operation.cancel(OperationState.CANCELED);
      }
    }


http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
index 01dd48c..67e0e52 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
@@ -18,12 +18,24 @@

  package org.apache.hive.service.cli.operation;

-import java.io.*;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.Serializable;
+import java.io.UnsupportedEncodingException;
  import java.security.PrivilegedExceptionAction;
  import java.sql.SQLException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
  import java.util.concurrent.Future;
  import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
  import java.util.concurrent.atomic.AtomicInteger;

  import org.apache.commons.codec.binary.Base64;
@@ -84,9 +96,10 @@ public class SQLOperation extends ExecuteStatementOperation {
    private SerDe serde = null;
    private boolean fetchStarted = false;
    private volatile MetricsScope currentSQLStateScope;
-
- //Display for WebUI.
+ // Display for WebUI.
    private SQLOperationDisplay sqlOpDisplay;
+ private long queryTimeout;
+ private ScheduledExecutorService timeoutExecutor;

    /**
     * A map to track query count running by each user
@@ -94,10 +107,11 @@ public class SQLOperation extends ExecuteStatementOperation {
    private static Map<String, AtomicInteger> userQueries = new HashMap<String, AtomicInteger>();
    private static final String ACTIVE_SQL_USER = MetricsConstant.SQL_OPERATION_PREFIX + "active_user";

- public SQLOperation(HiveSession parentSession, String statement, Map<String,
- String> confOverlay, boolean runInBackground) {
+ public SQLOperation(HiveSession parentSession, String statement, Map<String, String> confOverlay,
+ boolean runInBackground, long queryTimeout) {
      // TODO: call setRemoteUser in ExecuteStatementOperation or higher.
      super(parentSession, statement, confOverlay, runInBackground);
+ this.queryTimeout = queryTimeout;
      setupSessionIO(parentSession.getSessionState());
      try {
        sqlOpDisplay = new SQLOperationDisplay(this);
@@ -121,7 +135,7 @@ public class SQLOperation extends ExecuteStatementOperation {
        }
      }

- /***
+ /**
     * Compile the query and extract metadata
     * @param sqlOperationConf
     * @throws HiveSQLException
@@ -130,6 +144,29 @@ public class SQLOperation extends ExecuteStatementOperation {
      setState(OperationState.RUNNING);
      try {
        driver = new Driver(queryState, getParentSession().getUserName());
+
+ // Start the timer thread for cancelling the query when query timeout is reached
+ // queryTimeout == 0 means no timeout
+ if (queryTimeout > 0) {
+ timeoutExecutor = new ScheduledThreadPoolExecutor(1);
+ Runnable timeoutTask = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ LOG.info("Query timed out after: " + queryTimeout
+ + " seconds. Cancelling the execution now.");
+ SQLOperation.this.cancel(OperationState.TIMEDOUT);
+ } catch (HiveSQLException e) {
+ LOG.error("Error cancelling the query after timeout: " + queryTimeout + " seconds", e);
+ } finally {
+ // Stop
+ timeoutExecutor.shutdown();
+ }
+ }
+ };
+ timeoutExecutor.schedule(timeoutTask, queryTimeout, TimeUnit.SECONDS);
+ }
+
        sqlOpDisplay.setQueryDisplay(driver.getQueryDisplay());

        // set the operation handle information in Driver, so that thrift API users
@@ -184,6 +221,13 @@ public class SQLOperation extends ExecuteStatementOperation {

    private void runQuery() throws HiveSQLException {
      try {
+ OperationState opState = getStatus().getState();
+ // Operation may have been cancelled by another thread
+ if (opState.isTerminal()) {
+ LOG.info("Not running the query. Operation is already in terminal state: " + opState
+ + ", perhaps cancelled due to query timeout or by another thread.");
+ return;
+ }
        // In Hive server mode, we are not able to retry in the FetchTask
        // case, when calling fetch queries since execute() has returned.
        // For now, we disable the test attempts.
@@ -193,14 +237,16 @@ public class SQLOperation extends ExecuteStatementOperation {
          throw toSQLException("Error while processing statement", response);
        }
      } catch (HiveSQLException e) {
- // If the operation was cancelled by another thread,
- // Driver#run will return a non-zero response code.
- // We will simply return if the operation state is CANCELED,
- // otherwise throw an exception
- if (getStatus().getState() == OperationState.CANCELED) {
+ /**
+ * If the operation was cancelled by another thread, or the execution timed out, Driver#run
+ * may return a non-zero response code. We will simply return if the operation state is
+ * CANCELED, TIMEDOUT or CLOSED, otherwise throw an exception
+ */
+ if ((getStatus().getState() == OperationState.CANCELED)
+ || (getStatus().getState() == OperationState.TIMEDOUT)
+ || (getStatus().getState() == OperationState.CLOSED)) {
          return;
- }
- else {
+ } else {
          setState(OperationState.ERROR);
          throw e;
        }
@@ -312,8 +358,22 @@ public class SQLOperation extends ExecuteStatementOperation {
      }
    }

- private void cleanup(OperationState state) throws HiveSQLException {
+ private synchronized void cleanup(OperationState state) throws HiveSQLException {
      setState(state);
+ if (driver != null) {
+ driver.close();
+ driver.destroy();
+ }
+ driver = null;
+
+ SessionState ss = SessionState.get();
+ if (ss == null) {
+ LOG.warn("Operation seems to be in invalid state, SessionState is null");
+ } else {
+ ss.deleteTmpOutputFile();
+ ss.deleteTmpErrOutputFile();
+ }
+
      if (shouldRunAsync()) {
        Future<?> backgroundHandle = getBackgroundHandle();
        if (backgroundHandle != null) {
@@ -321,20 +381,16 @@ public class SQLOperation extends ExecuteStatementOperation {
        }
      }

- if (driver != null) {
- driver.close();
- driver.destroy();
+ // Shutdown the timeout thread if any, while closing this operation
+ if ((timeoutExecutor != null) && (state != OperationState.TIMEDOUT) && (state.isTerminal())) {
+ timeoutExecutor.shutdownNow();
      }
- driver = null;
-
- SessionState ss = SessionState.get();
- ss.deleteTmpOutputFile();
- ss.deleteTmpErrOutputFile();
    }

    @Override
- public void cancel() throws HiveSQLException {
- cleanup(OperationState.CANCELED);
+ public void cancel(OperationState stateAfterCancel) throws HiveSQLException {
+ cleanup(stateAfterCancel);
+ cleanupOperationLog();
    }

    @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/session/HiveSession.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSession.java b/service/src/java/org/apache/hive/service/cli/session/HiveSession.java
index 9ea643b..78ff388 100644
--- a/service/src/java/org/apache/hive/service/cli/session/HiveSession.java
+++ b/service/src/java/org/apache/hive/service/cli/session/HiveSession.java
@@ -56,18 +56,38 @@ public interface HiveSession extends HiveSessionBase {
     * @return
     * @throws HiveSQLException
     */
- OperationHandle executeStatement(String statement,
- Map<String, String> confOverlay) throws HiveSQLException;
+ OperationHandle executeStatement(String statement, Map<String, String> confOverlay) throws HiveSQLException;

    /**
     * execute operation handler
     * @param statement
     * @param confOverlay
+ * @param queryTimeout
     * @return
     * @throws HiveSQLException
     */
- OperationHandle executeStatementAsync(String statement,
- Map<String, String> confOverlay) throws HiveSQLException;
+ OperationHandle executeStatement(String statement, Map<String, String> confOverlay,
+ long queryTimeout) throws HiveSQLException;
+
+ /**
+ * execute operation handler
+ * @param statement
+ * @param confOverlay
+ * @return
+ * @throws HiveSQLException
+ */
+ OperationHandle executeStatementAsync(String statement, Map<String, String> confOverlay) throws HiveSQLException;
+
+ /**
+ * execute operation handler
+ * @param statement
+ * @param confOverlay
+ * @param queryTimeout
+ * @return
+ * @throws HiveSQLException
+ */
+ OperationHandle executeStatementAsync(String statement, Map<String, String> confOverlay,
+ long queryTimeout) throws HiveSQLException;

    /**
     * getTypeInfo operation handler

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
index 0cfec7a..a0015eb 100644
--- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
+++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
@@ -204,7 +204,7 @@ public class HiveSessionImpl implements HiveSession {
        OperationHandle opHandle = null;
        try {
          //execute in sync mode
- opHandle = executeStatementInternal(cmd_trimed, null, false);
+ opHandle = executeStatementInternal(cmd_trimed, null, false, 0);
        } catch (HiveSQLException e) {
          LOG.warn("Failed to execute command in global .hiverc file.", e);
          return -1;
@@ -426,33 +426,43 @@ public class HiveSessionImpl implements HiveSession {
    }

    @Override
- public OperationHandle executeStatement(String statement, Map<String, String> confOverlay)
- throws HiveSQLException {
- return executeStatementInternal(statement, confOverlay, false);
+ public OperationHandle executeStatement(String statement, Map<String, String> confOverlay) throws HiveSQLException {
+ return executeStatementInternal(statement, confOverlay, false, 0);
    }

    @Override
- public OperationHandle executeStatementAsync(String statement, Map<String, String> confOverlay)
- throws HiveSQLException {
- return executeStatementInternal(statement, confOverlay, true);
+ public OperationHandle executeStatement(String statement, Map<String, String> confOverlay,
+ long queryTimeout) throws HiveSQLException {
+ return executeStatementInternal(statement, confOverlay, false, queryTimeout);
    }

- private OperationHandle executeStatementInternal(String statement, Map<String, String> confOverlay,
- boolean runAsync)
- throws HiveSQLException {
+ @Override
+ public OperationHandle executeStatementAsync(String statement, Map<String, String> confOverlay) throws HiveSQLException {
+ return executeStatementInternal(statement, confOverlay, true, 0);
+ }
+
+ @Override
+ public OperationHandle executeStatementAsync(String statement, Map<String, String> confOverlay,
+ long queryTimeout) throws HiveSQLException {
+ return executeStatementInternal(statement, confOverlay, true, queryTimeout);
+ }
+
+ private OperationHandle executeStatementInternal(String statement,
+ Map<String, String> confOverlay, boolean runAsync, long queryTimeout) throws HiveSQLException {
      acquire(true);

      OperationManager operationManager = getOperationManager();
- ExecuteStatementOperation operation = operationManager
- .newExecuteStatementOperation(getSession(), statement, confOverlay, runAsync);
+ ExecuteStatementOperation operation =
+ operationManager.newExecuteStatementOperation(getSession(), statement, confOverlay,
+ runAsync, queryTimeout);
      OperationHandle opHandle = operation.getHandle();
      try {
        operation.run();
        addOpHandle(opHandle);
        return opHandle;
      } catch (HiveSQLException e) {
- // Refering to SQLOperation.java,there is no chance that a HiveSQLException throws and the asyn
- // background operation submits to thread pool successfully at the same time. So, Cleanup
+ // Refering to SQLOperation.java, there is no chance that a HiveSQLException throws and the
+ // async background operation submits to thread pool successfully at the same time. So, Cleanup
        // opHandle directly when got HiveSQLException
        operationManager.closeOperation(opHandle);
        throw e;

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java b/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java
index b2e0e9e..933750b 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java
@@ -126,20 +126,30 @@ public class RetryingThriftCLIServiceClient implements InvocationHandler {
      }

      @Override
- public OperationHandle executeStatement(SessionHandle sessionHandle,
- String statement,
- Map<String, String> confOverlay) throws HiveSQLException {
+ public OperationHandle executeStatement(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay) throws HiveSQLException {
        return cliService.executeStatement(sessionHandle, statement, confOverlay);
      }

      @Override
- public OperationHandle executeStatementAsync(SessionHandle sessionHandle,
- String statement,
- Map<String, String> confOverlay) throws HiveSQLException {
+ public OperationHandle executeStatement(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException {
+ return cliService.executeStatement(sessionHandle, statement, confOverlay, queryTimeout);
+ }
+
+ @Override
+ public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay) throws HiveSQLException {
        return cliService.executeStatementAsync(sessionHandle, statement, confOverlay);
      }

      @Override
+ public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException {
+ return cliService.executeStatementAsync(sessionHandle, statement, confOverlay, queryTimeout);
+ }
+
+ @Override
      public OperationHandle getTypeInfo(SessionHandle sessionHandle) throws HiveSQLException {
        return cliService.getTypeInfo(sessionHandle);
      }

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
index 6ede1d7..5464e58 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
@@ -493,15 +493,17 @@ public abstract class ThriftCLIService extends AbstractService implements TCLISe
        String statement = req.getStatement();
        Map<String, String> confOverlay = req.getConfOverlay();
        Boolean runAsync = req.isRunAsync();
- OperationHandle operationHandle = runAsync ?
- cliService.executeStatementAsync(sessionHandle, statement, confOverlay)
- : cliService.executeStatement(sessionHandle, statement, confOverlay);
- resp.setOperationHandle(operationHandle.toTOperationHandle());
- resp.setStatus(OK_STATUS);
+ long queryTimeout = req.getQueryTimeout();
+ OperationHandle operationHandle =
+ runAsync ? cliService.executeStatementAsync(sessionHandle, statement, confOverlay,
+ queryTimeout) : cliService.executeStatement(sessionHandle, statement, confOverlay,
+ queryTimeout);
+ resp.setOperationHandle(operationHandle.toTOperationHandle());
+ resp.setStatus(OK_STATUS);
      } catch (Exception e) {
        // Note: it's rather important that this (and other methods) catch Exception, not Throwable;
- // in combination with HiveSessionProxy.invoke code, perhaps unintentionally, it used
- // to also catch all errors; and now it allows OOMs only to propagate.
+ // in combination with HiveSessionProxy.invoke code, perhaps unintentionally, it used
+ // to also catch all errors; and now it allows OOMs only to propagate.
        LOG.warn("Error executing statement: ", e);
        resp.setStatus(HiveSQLException.toTStatus(e));
      }

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java
index 098aea6..82ac42d 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java
@@ -166,34 +166,38 @@ public class ThriftCLIServiceClient extends CLIServiceClient {
      }
    }

- /* (non-Javadoc)
- * @see org.apache.hive.service.cli.ICLIService#executeStatement(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.util.Map)
- */
    @Override
    public OperationHandle executeStatement(SessionHandle sessionHandle, String statement,
- Map<String, String> confOverlay)
- throws HiveSQLException {
- return executeStatementInternal(sessionHandle, statement, confOverlay, false);
+ Map<String, String> confOverlay) throws HiveSQLException {
+ return executeStatementInternal(sessionHandle, statement, confOverlay, false, 0);
+ }
+
+ @Override
+ public OperationHandle executeStatement(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException {
+ return executeStatementInternal(sessionHandle, statement, confOverlay, false, queryTimeout);
    }

- /* (non-Javadoc)
- * @see org.apache.hive.service.cli.ICLIService#executeStatementAsync(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.util.Map)
- */
    @Override
    public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement,
- Map<String, String> confOverlay)
- throws HiveSQLException {
- return executeStatementInternal(sessionHandle, statement, confOverlay, true);
+ Map<String, String> confOverlay) throws HiveSQLException {
+ return executeStatementInternal(sessionHandle, statement, confOverlay, true, 0);
+ }
+
+ @Override
+ public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement,
+ Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException {
+ return executeStatementInternal(sessionHandle, statement, confOverlay, true, queryTimeout);
    }

    private OperationHandle executeStatementInternal(SessionHandle sessionHandle, String statement,
- Map<String, String> confOverlay, boolean isAsync)
- throws HiveSQLException {
+ Map<String, String> confOverlay, boolean isAsync, long queryTimeout) throws HiveSQLException {
      try {
        TExecuteStatementReq req =
            new TExecuteStatementReq(sessionHandle.toTSessionHandle(), statement);
        req.setConfOverlay(confOverlay);
        req.setRunAsync(isAsync);
+ req.setQueryTimeout(queryTimeout);
        TExecuteStatementResp resp = cliService.ExecuteStatement(req);
        checkStatus(resp.getStatus());
        TProtocolVersion protocol = sessionHandle.getProtocolVersion();

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java b/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java
index 1740079..abb1ecf 100644
--- a/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java
+++ b/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java
@@ -178,8 +178,7 @@ public abstract class ThriftCLIServiceTest {

      // Execute another query
      queryString = "SELECT ID+1 FROM TEST_EXEC_THRIFT";
- OperationHandle opHandle = client.executeStatement(sessHandle,
- queryString, opConf);
+ OperationHandle opHandle = client.executeStatement(sessHandle, queryString, opConf);
      assertNotNull(opHandle);

      OperationStatus opStatus = client.getOperationStatus(opHandle);
@@ -229,8 +228,7 @@ public abstract class ThriftCLIServiceTest {
      // Execute another query
      queryString = "SELECT ID+1 FROM TEST_EXEC_ASYNC_THRIFT";
      System.out.println("Will attempt to execute: " + queryString);
- opHandle = client.executeStatementAsync(sessHandle,
- queryString, opConf);
+ opHandle = client.executeStatementAsync(sessHandle, queryString, opConf);
      assertNotNull(opHandle);

      // Poll on the operation status till the query is completed

http://git-wip-us.apache.org/repos/asf/hive/blob/b6218275/service/src/test/org/apache/hive/service/cli/thrift/ThriftCliServiceTestWithCookie.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/cli/thrift/ThriftCliServiceTestWithCookie.java b/service/src/test/org/apache/hive/service/cli/thrift/ThriftCliServiceTestWithCookie.java
index a1ef1fc..ab20c4c 100644
--- a/service/src/test/org/apache/hive/service/cli/thrift/ThriftCliServiceTestWithCookie.java
+++ b/service/src/test/org/apache/hive/service/cli/thrift/ThriftCliServiceTestWithCookie.java
@@ -200,8 +200,7 @@ public class ThriftCliServiceTestWithCookie {

      // Execute another query
      queryString = "SELECT ID+1 FROM TEST_EXEC_THRIFT";
- OperationHandle opHandle = client.executeStatement(sessHandle,
- queryString, opConf);
+ OperationHandle opHandle = client.executeStatement(sessHandle, queryString, opConf);
      assertNotNull(opHandle);

      OperationStatus opStatus = client.getOperationStatus(opHandle);

Search Discussions

  • Spena at May 6, 2016 at 8:42 pm
    Merge branch 'master' into llap


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bc75d72b
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bc75d72b
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bc75d72b

    Branch: refs/heads/java8
    Commit: bc75d72b8e5ed8a538cf69d3b0a7556150e4713e
    Parents: e073cce b621827
    Author: Jason Dere <jdere@hortonworks.com>
    Authored: Tue May 3 13:30:39 2016 -0700
    Committer: Jason Dere <jdere@hortonworks.com>
    Committed: Tue May 3 13:30:39 2016 -0700

    ----------------------------------------------------------------------
      .../apache/hadoop/hive/common/FileUtils.java | 19 +-
      .../org/apache/hadoop/hive/conf/HiveConf.java | 2 +-
      .../org/apache/hive/jdbc/TestJdbcDriver2.java | 43 +-
      .../cli/session/TestHiveSessionImpl.java | 2 +-
      .../test/resources/testconfiguration.properties | 4 +
      .../org/apache/hive/jdbc/HiveStatement.java | 20 +-
      .../llap/tezplugins/LlapTaskCommunicator.java | 47 +
      .../hive/metastore/HiveMetaStoreFsImpl.java | 11 +-
      .../java/org/apache/hadoop/hive/ql/Driver.java | 27 +-
      .../org/apache/hadoop/hive/ql/exec/DDLTask.java | 73 +-
      .../apache/hadoop/hive/ql/exec/MoveTask.java | 7 +-
      .../hive/ql/exec/vector/VectorExtractRow.java | 12 +-
      .../ql/exec/vector/VectorizationContext.java | 2 +-
      .../hadoop/hive/ql/history/HiveHistoryImpl.java | 8 +-
      .../ql/io/orc/ConvertTreeReaderFactory.java | 3750 +++++++++++++
      .../hadoop/hive/ql/io/orc/SchemaEvolution.java | 29 +-
      .../hive/ql/io/orc/TreeReaderFactory.java | 86 +-
      .../apache/hadoop/hive/ql/metadata/Hive.java | 27 +-
      .../hadoop/hive/ql/session/OperationLog.java | 8 +-
      .../hadoop/hive/ql/session/SessionState.java | 2 +-
      .../clientnegative/orc_replace_columns2.q | 5 +-
      .../clientnegative/orc_replace_columns2_acid.q | 5 +-
      .../clientnegative/orc_replace_columns3.q | 3 +
      .../clientnegative/orc_replace_columns3_acid.q | 3 +
      .../clientnegative/orc_type_promotion1.q | 7 +-
      .../clientnegative/orc_type_promotion1_acid.q | 7 +-
      .../clientnegative/orc_type_promotion2.q | 5 +-
      .../clientnegative/orc_type_promotion2_acid.q | 5 +-
      .../clientnegative/orc_type_promotion3.q | 5 +-
      .../clientnegative/orc_type_promotion3_acid.q | 5 +-
      .../clientpositive/orc_int_type_promotion.q | 4 +
      .../clientpositive/orc_schema_evolution.q | 2 +
      .../schema_evol_orc_acid_mapwork_part.q | 846 ++-
      .../schema_evol_orc_acid_mapwork_table.q | 804 ++-
      .../schema_evol_orc_acidvec_mapwork_part.q | 843 ++-
      .../schema_evol_orc_acidvec_mapwork_table.q | 801 ++-
      .../schema_evol_orc_nonvec_fetchwork_part.q | 831 ++-
      .../schema_evol_orc_nonvec_fetchwork_table.q | 824 ++-
      .../schema_evol_orc_nonvec_mapwork_part.q | 833 ++-
      ...a_evol_orc_nonvec_mapwork_part_all_complex.q | 162 +
      ...evol_orc_nonvec_mapwork_part_all_primitive.q | 481 ++
      .../schema_evol_orc_nonvec_mapwork_table.q | 824 ++-
      .../schema_evol_orc_vec_mapwork_part.q | 831 ++-
      ...hema_evol_orc_vec_mapwork_part_all_complex.q | 162 +
      ...ma_evol_orc_vec_mapwork_part_all_primitive.q | 481 ++
      .../schema_evol_orc_vec_mapwork_table.q | 819 ++-
      .../schema_evol_text_nonvec_mapwork_part.q | 5 +-
      ..._evol_text_nonvec_mapwork_part_all_complex.q | 5 +-
      ...vol_text_nonvec_mapwork_part_all_primitive.q | 5 +-
      .../schema_evol_text_nonvec_mapwork_table.q | 5 +-
      .../schema_evol_text_vec_mapwork_part.q | 2 +-
      ...ema_evol_text_vec_mapwork_part_all_complex.q | 2 +-
      ...a_evol_text_vec_mapwork_part_all_primitive.q | 2 +-
      .../schema_evol_text_vec_mapwork_table.q | 2 +-
      .../schema_evol_text_vecrow_mapwork_part.q | 2 +-
      ..._evol_text_vecrow_mapwork_part_all_complex.q | 2 +-
      ...vol_text_vecrow_mapwork_part_all_primitive.q | 2 +-
      .../schema_evol_text_vecrow_mapwork_table.q | 2 +-
      .../clientnegative/orc_replace_columns2.q.out | 13 +-
      .../orc_replace_columns2_acid.q.out | 13 +-
      .../clientnegative/orc_replace_columns3.q.out | 11 +-
      .../orc_replace_columns3_acid.q.out | 11 +-
      .../clientnegative/orc_type_promotion1.q.out | 13 +-
      .../orc_type_promotion1_acid.q.out | 13 +-
      .../clientnegative/orc_type_promotion2.q.out | 13 +-
      .../orc_type_promotion2_acid.q.out | 13 +-
      .../clientnegative/orc_type_promotion3.q.out | 11 +-
      .../clientnegative/orc_type_promotion3_acid.q | 18 +
      .../orc_type_promotion3_acid.q.out | 11 +-
      .../schema_evol_orc_acid_mapwork_part.q.out | 4319 ++++++++++++---
      .../schema_evol_orc_acid_mapwork_table.q.out | 3334 ++++++++++--
      .../schema_evol_orc_acidvec_mapwork_part.q.out | 4319 ++++++++++++---
      .../schema_evol_orc_acidvec_mapwork_table.q.out | 3334 ++++++++++--
      .../schema_evol_orc_nonvec_fetchwork_part.q.out | 4905 +++++++++++++++--
      ...schema_evol_orc_nonvec_fetchwork_table.q.out | 4367 +++++++++++++++-
      .../schema_evol_orc_nonvec_mapwork_part.q.out | 4909 +++++++++++++++--
      ...ol_orc_nonvec_mapwork_part_all_complex.q.out | 726 +++
      ..._orc_nonvec_mapwork_part_all_primitive.q.out | 2872 ++++++++++
      .../schema_evol_orc_nonvec_mapwork_table.q.out | 4367 +++++++++++++++-
      .../schema_evol_orc_vec_mapwork_part.q.out | 4929 ++++++++++++++++--
      ..._evol_orc_vec_mapwork_part_all_complex.q.out | 726 +++
      ...vol_orc_vec_mapwork_part_all_primitive.q.out | 2887 ++++++++++
      .../schema_evol_orc_vec_mapwork_table.q.out | 4391 +++++++++++++++-
      .../tez/schema_evol_orc_acid_mapwork_part.q.out | 4319 ++++++++++++---
      .../schema_evol_orc_acid_mapwork_table.q.out | 3334 ++++++++++--
      .../schema_evol_orc_acidvec_mapwork_part.q.out | 4319 ++++++++++++---
      .../schema_evol_orc_acidvec_mapwork_table.q.out | 3334 ++++++++++--
      .../schema_evol_orc_nonvec_fetchwork_part.q.out | 4449 ++++++++++++++--
      ...schema_evol_orc_nonvec_fetchwork_table.q.out | 3911 +++++++++++++-
      .../schema_evol_orc_nonvec_mapwork_part.q.out | 4453 ++++++++++++++--
      ...ol_orc_nonvec_mapwork_part_all_complex.q.out | 669 +++
      ..._orc_nonvec_mapwork_part_all_primitive.q.out | 2587 +++++++++
      .../schema_evol_orc_nonvec_mapwork_table.q.out | 3911 +++++++++++++-
      .../tez/schema_evol_orc_vec_mapwork_part.q.out | 4449 ++++++++++++++--
      ..._evol_orc_vec_mapwork_part_all_complex.q.out | 669 +++
      ...vol_orc_vec_mapwork_part_all_primitive.q.out | 2587 +++++++++
      .../tez/schema_evol_orc_vec_mapwork_table.q.out | 3911 +++++++++++++-
      service-rpc/if/TCLIService.thrift | 6 +
      .../gen/thrift/gen-cpp/TCLIService_types.cpp | 30 +-
      .../src/gen/thrift/gen-cpp/TCLIService_types.h | 15 +-
      .../rpc/thrift/TExecuteStatementReq.java | 109 +-
      .../service/rpc/thrift/TOperationState.java | 5 +-
      service-rpc/src/gen/thrift/gen-php/Types.php | 25 +
      .../src/gen/thrift/gen-py/TCLIService/ttypes.py | 18 +-
      .../gen/thrift/gen-rb/t_c_l_i_service_types.rb | 9 +-
      .../org/apache/hive/service/cli/CLIService.java | 46 +-
      .../service/cli/EmbeddedCLIServiceClient.java | 19 +-
      .../apache/hive/service/cli/ICLIService.java | 16 +-
      .../apache/hive/service/cli/OperationState.java | 7 +-
      .../operation/ExecuteStatementOperation.java | 9 +-
      .../cli/operation/HiveCommandOperation.java | 5 +
      .../cli/operation/MetadataOperation.java | 7 +-
      .../hive/service/cli/operation/Operation.java | 17 +-
      .../service/cli/operation/OperationManager.java | 27 +-
      .../service/cli/operation/SQLOperation.java | 106 +-
      .../hive/service/cli/session/HiveSession.java | 28 +-
      .../service/cli/session/HiveSessionImpl.java | 38 +-
      .../thrift/RetryingThriftCLIServiceClient.java | 22 +-
      .../service/cli/thrift/ThriftCLIService.java | 16 +-
      .../cli/thrift/ThriftCLIServiceClient.java | 32 +-
      .../cli/thrift/ThriftCLIServiceTest.java | 6 +-
      .../thrift/ThriftCliServiceTestWithCookie.java | 3 +-
      .../apache/hadoop/hive/shims/Hadoop23Shims.java | 148 -
      .../org/apache/hadoop/hive/io/HdfsUtils.java | 156 +-
      .../apache/hadoop/hive/shims/HadoopShims.java | 41 -
      .../hadoop/hive/shims/HadoopShimsSecure.java | 10 -
      126 files changed, 101719 insertions(+), 10497 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/bc75d72b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    ----------------------------------------------------------------------

    http://git-wip-us.apache.org/repos/asf/hive/blob/bc75d72b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
    ----------------------------------------------------------------------
  • Spena at May 6, 2016 at 8:42 pm
    HIVE-13442 : LLAP: refactor submit API to be amenable to signing (Sergey Shelukhin, reviewed by Siddharth Seth)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0b5c27fd
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0b5c27fd
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0b5c27fd

    Branch: refs/heads/java8
    Commit: 0b5c27fdd4fbf8861d4eefc207c2da3a6ceac23d
    Parents: b70efa4
    Author: Sergey Shelukhin <sershe@apache.org>
    Authored: Tue May 3 15:02:24 2016 -0700
    Committer: Sergey Shelukhin <sershe@apache.org>
    Committed: Tue May 3 15:23:39 2016 -0700

    ----------------------------------------------------------------------
      .../daemon/rpc/LlapDaemonProtocolProtos.java | 6836 +++++++++++-------
      .../apache/hadoop/hive/llap/tez/Converters.java | 84 +-
      .../src/protobuf/LlapDaemonProtocol.proto | 69 +-
      .../hadoop/hive/llap/tez/TestConverters.java | 51 +-
      .../llap/daemon/impl/ContainerRunnerImpl.java | 88 +-
      .../llap/daemon/impl/QueryFragmentInfo.java | 23 +-
      .../hadoop/hive/llap/daemon/impl/QueryInfo.java | 9 +-
      .../hive/llap/daemon/impl/QueryTracker.java | 18 +-
      .../llap/daemon/impl/TaskExecutorService.java | 8 +-
      .../llap/daemon/impl/TaskRunnerCallable.java | 77 +-
      .../daemon/impl/TaskExecutorTestHelpers.java | 42 +-
      .../TestFirstInFirstOutComparator.java | 27 +-
      .../llap/tezplugins/LlapTaskCommunicator.java | 31 +-
      13 files changed, 4504 insertions(+), 2859 deletions(-)
    ----------------------------------------------------------------------
  • Spena at May 6, 2016 at 8:42 pm
    HIVE-13213 make DbLockManger work for non-acid resources (Eugene Koifman, reviewed by Alan Gates)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b70efa44
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b70efa44
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b70efa44

    Branch: refs/heads/java8
    Commit: b70efa447d9ae5883315d88e84ad1262d371213d
    Parents: 47bf055
    Author: Eugene Koifman <ekoifman@hortonworks.com>
    Authored: Tue May 3 13:38:42 2016 -0700
    Committer: Eugene Koifman <ekoifman@hortonworks.com>
    Committed: Tue May 3 13:53:02 2016 -0700

    ----------------------------------------------------------------------
      .../hadoop/hive/metastore/txn/TxnHandler.java | 5 ++
      .../hadoop/hive/ql/lockmgr/DbTxnManager.java | 12 +++
      .../apache/hadoop/hive/ql/TestTxnCommands2.java | 22 ++++++
      .../hive/ql/lockmgr/TestDbTxnManager2.java | 81 ++++++++++++++++++++
      4 files changed, 120 insertions(+)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/b70efa44/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
    index c32b0b0..c0fa97a 100644
    --- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
    +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
    @@ -87,6 +87,11 @@ import java.util.regex.Pattern;
       * If we ever decide to run remote Derby server, according to
       * https://db.apache.org/derby/docs/10.0/manuals/develop/develop78.html all transactions will be
       * seriazlied, so that would also work though has not been tested.
    + *
    + * General design note:
    + * It's imperative that any operation on a txn (e.g. commit), ensure (atomically) that this txn is
    + * still valid and active. In the code this is usually achieved at the same time the txn record
    + * is locked for some operation.
       */
      @InterfaceAudience.Private
      @InterfaceStability.Evolving

    http://git-wip-us.apache.org/repos/asf/hive/blob/b70efa44/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    index e8ebe55..3aec8eb 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.lockmgr;

      import com.google.common.annotations.VisibleForTesting;
      import org.apache.hadoop.conf.Configuration;
    +import org.apache.hadoop.hive.ql.io.AcidUtils;
      import org.apache.hive.common.util.ShutdownHookManager;
      import org.slf4j.Logger;
      import org.slf4j.LoggerFactory;
    @@ -213,6 +214,17 @@ public class DbTxnManager extends HiveTxnManagerImpl {
                break;

              case INSERT:
    + t = output.getTable();
    + if(t == null) {
    + throw new IllegalStateException("No table info for " + output);
    + }
    + if(AcidUtils.isAcidTable(t)) {
    + compBuilder.setShared();
    + }
    + else {
    + compBuilder.setExclusive();
    + }
    + break;
              case DDL_SHARED:
                compBuilder.setShared();
                break;

    http://git-wip-us.apache.org/repos/asf/hive/blob/b70efa44/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
    index 04c1d17..1030987 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
    @@ -433,6 +433,28 @@ public class TestTxnCommands2 {
        }

        /**
    + * Test update that hits multiple partitions (i.e. requries dynamic partition insert to process)
    + * @throws Exception
    + */
    + @Test
    + public void updateDeletePartitioned() throws Exception {
    + int[][] tableData = {{1,2},{3,4},{5,6}};
    + runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p=1) (a,b) " + makeValuesClause(tableData));
    + runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p=2) (a,b) " + makeValuesClause(tableData));
    + TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
    + txnHandler.compact(new CompactionRequest("default", Table.ACIDTBLPART.name(), CompactionType.MAJOR));
    + runWorker(hiveConf);
    + runCleaner(hiveConf);
    + runStatementOnDriver("update " + Table.ACIDTBLPART + " set b = b + 1 where a = 3");
    + txnHandler.compact(new CompactionRequest("default", Table.ACIDTBLPART.toString(), CompactionType.MAJOR));
    + runWorker(hiveConf);
    + runCleaner(hiveConf);
    + List<String> rs = runStatementOnDriver("select p,a,b from " + Table.ACIDTBLPART + " order by p, a, b");
    + int[][] expectedData = {{1,1,2},{1,3,5},{1,5,6},{2,1,2},{2,3,5},{2,5,6}};
    + Assert.assertEquals("Update " + Table.ACIDTBLPART + " didn't match:", stringifyValues(expectedData), rs);
    + }
    +
    + /**
         * https://issues.apache.org/jira/browse/HIVE-10151
         */
        @Test

    http://git-wip-us.apache.org/repos/asf/hive/blob/b70efa44/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
    index 6e2cf30..e94af55 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
    @@ -536,6 +536,87 @@ public class TestDbTxnManager2 {
          Assert.assertEquals(0, count);
        }

    + /**
    + * collection of queries where we ensure that we get the locks that are expected
    + * @throws Exception
    + */
    + @Test
    + public void checkExpectedLocks() throws Exception {
    + CommandProcessorResponse cpr = null;
    + cpr = driver.run("create table acidPart(a int, b int) partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
    + checkCmdOnDriver(cpr);
    + cpr = driver.run("create table nonAcidPart(a int, b int) partitioned by (p string) stored as orc");
    + checkCmdOnDriver(cpr);
    +
    + cpr = driver.compileAndRespond("insert into nonAcidPart partition(p) values(1,2,3)");
    + checkCmdOnDriver(cpr);
    + LockState lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false);
    + List<ShowLocksResponseElement> locks = getLocks();
    + Assert.assertEquals("Unexpected lock count", 2, locks.size());
    + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "values__tmp__table__1", null, locks.get(0));
    + checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", "nonAcidPart", null, locks.get(1));
    + List<HiveLock> relLocks = new ArrayList<HiveLock>(2);
    + relLocks.add(new DbLockManager.DbHiveLock(locks.get(0).getLockid()));
    + relLocks.add(new DbLockManager.DbHiveLock(locks.get(1).getLockid()));
    + txnMgr.getLockManager().releaseLocks(relLocks);
    +
    + cpr = driver.compileAndRespond("insert into nonAcidPart partition(p=1) values(5,6)");
    + checkCmdOnDriver(cpr);
    + lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false);
    + locks = getLocks();
    + Assert.assertEquals("Unexpected lock count", 2, locks.size());
    + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "values__tmp__table__2", null, locks.get(0));
    + checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", "nonAcidPart", "p=1", locks.get(1));
    + relLocks = new ArrayList<HiveLock>(2);
    + relLocks.add(new DbLockManager.DbHiveLock(locks.get(0).getLockid()));
    + relLocks.add(new DbLockManager.DbHiveLock(locks.get(1).getLockid()));
    + txnMgr.getLockManager().releaseLocks(relLocks);
    +
    + cpr = driver.compileAndRespond("insert into acidPart partition(p) values(1,2,3)");
    + checkCmdOnDriver(cpr);
    + lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false);
    + locks = getLocks();
    + Assert.assertEquals("Unexpected lock count", 2, locks.size());
    + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "values__tmp__table__3", null, locks.get(0));
    + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "acidPart", null, locks.get(1));
    + relLocks = new ArrayList<HiveLock>(2);
    + relLocks.add(new DbLockManager.DbHiveLock(locks.get(0).getLockid()));
    + relLocks.add(new DbLockManager.DbHiveLock(locks.get(1).getLockid()));
    + txnMgr.getLockManager().releaseLocks(relLocks);
    +
    + cpr = driver.compileAndRespond("insert into acidPart partition(p=1) values(5,6)");
    + checkCmdOnDriver(cpr);
    + lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false);
    + locks = getLocks();
    + Assert.assertEquals("Unexpected lock count", 2, locks.size());
    + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "values__tmp__table__4", null, locks.get(0));
    + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "acidPart", "p=1", locks.get(1));
    + relLocks = new ArrayList<HiveLock>(2);
    + relLocks.add(new DbLockManager.DbHiveLock(locks.get(0).getLockid()));
    + relLocks.add(new DbLockManager.DbHiveLock(locks.get(1).getLockid()));
    + txnMgr.getLockManager().releaseLocks(relLocks);
    +
    + cpr = driver.compileAndRespond("update acidPart set b = 17 where a = 1");
    + checkCmdOnDriver(cpr);
    + lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false);
    + locks = getLocks();
    + Assert.assertEquals("Unexpected lock count", 1, locks.size());
    + checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "acidPart", null, locks.get(0));
    + relLocks = new ArrayList<HiveLock>(2);
    + relLocks.add(new DbLockManager.DbHiveLock(locks.get(0).getLockid()));
    + txnMgr.getLockManager().releaseLocks(relLocks);
    +
    + cpr = driver.compileAndRespond("update acidPart set b = 17 where p = 1");
    + checkCmdOnDriver(cpr);
    + lockState = ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "Practical", false);
    + locks = getLocks();
    + Assert.assertEquals("Unexpected lock count", 1, locks.size());
    + checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "acidPart", null, locks.get(0));//https://issues.apache.org/jira/browse/HIVE-13212
    + relLocks = new ArrayList<HiveLock>(2);
    + relLocks.add(new DbLockManager.DbHiveLock(locks.get(0).getLockid()));
    + txnMgr.getLockManager().releaseLocks(relLocks);
    + }
    +
        private void checkLock(LockType type, LockState state, String db, String table, String partition, ShowLocksResponseElement l) {
          Assert.assertEquals(l.toString(),l.getType(), type);
          Assert.assertEquals(l.toString(),l.getState(), state);
  • Spena at May 6, 2016 at 8:42 pm
    HIVE-11848 - tables in subqueries don't get locked (Eugene Koifman, reviewed by Wei Zheng)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/47bf055c
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/47bf055c
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/47bf055c

    Branch: refs/heads/java8
    Commit: 47bf055c02990272753105b917b487c5bbfe9208
    Parents: 868e5e1
    Author: Eugene Koifman <ekoifman@hortonworks.com>
    Authored: Tue May 3 13:33:42 2016 -0700
    Committer: Eugene Koifman <ekoifman@hortonworks.com>
    Committed: Tue May 3 13:53:02 2016 -0700

    ----------------------------------------------------------------------
      .../ql/parse/UpdateDeleteSemanticAnalyzer.java | 16 +++++++++-
      .../hive/ql/lockmgr/TestDbTxnManager2.java | 33 ++++++++++++++++++++
      2 files changed, 48 insertions(+), 1 deletion(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/47bf055c/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
    index b8771d2..33fbffe 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
    @@ -329,7 +329,9 @@ public class UpdateDeleteSemanticAnalyzer extends SemanticAnalyzer {
          // Walk through all our inputs and set them to note that this read is part of an update or a
          // delete.
          for (ReadEntity input : inputs) {
    - input.setUpdateOrDelete(true);
    + if(isWritten(input)) {
    + input.setUpdateOrDelete(true);
    + }
          }

          if (inputIsPartitioned(inputs)) {
    @@ -377,6 +379,18 @@ public class UpdateDeleteSemanticAnalyzer extends SemanticAnalyzer {
          }
        }

    + /**
    + * Check that {@code readEntity} is also being written
    + */
    + private boolean isWritten(Entity readEntity) {
    + for(Entity writeEntity : outputs) {
    + //make sure to compare them as Entity, i.e. that it's the same table or partition, etc
    + if(writeEntity.toString().equalsIgnoreCase(readEntity.toString())) {
    + return true;
    + }
    + }
    + return false;
    + }
        private String operation() {
          if (updating()) return "update";
          else if (deleting()) return "delete";

    http://git-wip-us.apache.org/repos/asf/hive/blob/47bf055c/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
    index 836b507..6e2cf30 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
    @@ -71,6 +71,39 @@ public class TestDbTxnManager2 {
          TxnDbUtil.prepDb();
        }
        @Test
    + public void testLocksInSubquery() throws Exception {
    + checkCmdOnDriver(driver.run("create table if not exists T (a int, b int)"));
    + checkCmdOnDriver(driver.run("create table if not exists S (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"));
    + checkCmdOnDriver(driver.run("create table if not exists R (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"));
    +
    + checkCmdOnDriver(driver.compileAndRespond("delete from S where a in (select a from T where b = 1)"));
    + txnMgr.openTxn("one");
    + txnMgr.acquireLocks(driver.getPlan(), ctx, "one");
    + List<ShowLocksResponseElement> locks = getLocks();
    + Assert.assertEquals("Unexpected lock count", 2, locks.size());
    + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T", null, locks.get(0));
    + checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "S", null, locks.get(1));
    + txnMgr.rollbackTxn();
    +
    + checkCmdOnDriver(driver.compileAndRespond("update S set a = 7 where a in (select a from T where b = 1)"));
    + txnMgr.openTxn("one");
    + txnMgr.acquireLocks(driver.getPlan(), ctx, "one");
    + locks = getLocks();
    + Assert.assertEquals("Unexpected lock count", 2, locks.size());
    + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T", null, locks.get(0));
    + checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "S", null, locks.get(1));
    + txnMgr.rollbackTxn();
    +
    + checkCmdOnDriver(driver.compileAndRespond("insert into R select * from S where a in (select a from T where b = 1)"));
    + txnMgr.openTxn("three");
    + txnMgr.acquireLocks(driver.getPlan(), ctx, "three");
    + locks = getLocks();
    + Assert.assertEquals("Unexpected lock count", 3, locks.size());
    + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T", null, locks.get(0));
    + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "S", null, locks.get(1));
    + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "R", null, locks.get(2));
    + }
    + @Test
        public void createTable() throws Exception {
          CommandProcessorResponse cpr = driver.compileAndRespond("create table if not exists T (a int, b int)");
          checkCmdOnDriver(cpr);
  • Spena at May 6, 2016 at 8:42 pm
    HIVE-13516: Adding BTEQ .IF, .QUIT, ERRORCODE to HPL/SQL (Dmitry Tolpeko reviewed by Alan Gates


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2d33d091
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2d33d091
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2d33d091

    Branch: refs/heads/java8
    Commit: 2d33d091b61dce092543970e62f41b63af1f32d1
    Parents: 8729966
    Author: Dmitry Tolpeko <dmtolpeko@gmail.com>
    Authored: Wed May 4 03:13:18 2016 -0700
    Committer: Dmitry Tolpeko <dmtolpeko@gmail.com>
    Committed: Wed May 4 03:13:18 2016 -0700

    ----------------------------------------------------------------------
      .../antlr4/org/apache/hive/hplsql/Hplsql.g4 | 108 ++++++++++---
      .../main/java/org/apache/hive/hplsql/Exec.java | 67 +++++++-
      .../java/org/apache/hive/hplsql/Expression.java | 31 ++--
      .../java/org/apache/hive/hplsql/Select.java | 31 ++--
      .../java/org/apache/hive/hplsql/Signal.java | 2 +-
      .../main/java/org/apache/hive/hplsql/Stmt.java | 154 ++++++++++++-------
      hplsql/src/main/resources/hplsql-site.xml | 2 -
      .../org/apache/hive/hplsql/TestHplsqlLocal.java | 5 +
      .../apache/hive/hplsql/TestHplsqlOffline.java | 20 +++
      hplsql/src/test/queries/local/if3_bteq.sql | 3 +
      .../test/queries/offline/create_table_td.sql | 45 ++++++
      hplsql/src/test/queries/offline/delete_all.sql | 1 +
      hplsql/src/test/queries/offline/select.sql | 42 +++++
      .../test/queries/offline/select_teradata.sql | 12 ++
      hplsql/src/test/results/db/select_into.out.txt | 3 +-
      hplsql/src/test/results/db/select_into2.out.txt | 4 +-
      hplsql/src/test/results/local/if3_bteq.out.txt | 3 +
      hplsql/src/test/results/local/lang.out.txt | 10 +-
      .../results/offline/create_table_mssql.out.txt | 39 ++---
      .../results/offline/create_table_mssql2.out.txt | 13 +-
      .../results/offline/create_table_mysql.out.txt | 5 +-
      .../results/offline/create_table_ora.out.txt | 65 ++++----
      .../results/offline/create_table_ora2.out.txt | 9 +-
      .../results/offline/create_table_pg.out.txt | 7 +-
      .../results/offline/create_table_td.out.txt | 31 ++++
      .../src/test/results/offline/delete_all.out.txt | 2 +
      hplsql/src/test/results/offline/select.out.txt | 34 ++++
      .../src/test/results/offline/select_db2.out.txt | 3 +-
      .../results/offline/select_teradata.out.txt | 10 ++
      29 files changed, 589 insertions(+), 172 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4 b/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
    index b84116f..5ce0e23 100644
    --- a/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
    +++ b/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
    @@ -30,7 +30,7 @@ single_block_stmt : // Single BEGIN END blo
             T_BEGIN block exception_block? block_end
    stmt T_SEMICOLON?
           ;
    -
    +
      block_end :
             {!_input.LT(2).getText().equalsIgnoreCase("TRANSACTION")}? T_END
           ;
    @@ -48,6 +48,7 @@ stmt :
    begin_transaction_stmt
    break_stmt
    call_stmt
    + | collect_stats_stmt
    close_stmt
    cmp_stmt
    copy_from_ftp_stmt
    @@ -83,6 +84,7 @@ stmt :
    merge_stmt
    open_stmt
    print_stmt
    + | quit_stmt
    raise_stmt
    resignal_stmt
    return_stmt
    @@ -181,9 +183,9 @@ declare_block_inplace :

      declare_stmt_item :
             declare_cursor_item
    - | declare_var_item
    declare_condition_item
    declare_handler_item
    + | declare_var_item
    declare_temporary_table_item
           ;

    @@ -213,15 +215,19 @@ declare_handler_item : // Condition handler declaration
           ;

      declare_temporary_table_item : // DECLARE TEMPORARY TABLE statement
    - T_GLOBAL? T_TEMPORARY T_TABLE ident (T_AS? T_OPEN_P select_stmt T_CLOSE_P | T_AS? select_stmt | T_OPEN_P create_table_columns T_CLOSE_P) create_table_options?
    + T_GLOBAL? T_TEMPORARY T_TABLE ident create_table_preoptions? create_table_definition
           ;

      create_table_stmt :
    - T_CREATE T_TABLE (T_IF T_NOT T_EXISTS)? table_name T_OPEN_P create_table_columns T_CLOSE_P create_table_options?
    + T_CREATE T_TABLE (T_IF T_NOT T_EXISTS)? table_name create_table_preoptions? create_table_definition
           ;

      create_local_temp_table_stmt :
    - T_CREATE (T_LOCAL T_TEMPORARY | (T_SET | T_MULTISET)? T_VOLATILE) T_TABLE ident create_table_preoptions? T_OPEN_P create_table_columns T_CLOSE_P create_table_options?
    + T_CREATE (T_LOCAL T_TEMPORARY | (T_SET | T_MULTISET)? T_VOLATILE) T_TABLE ident create_table_preoptions? create_table_definition
    + ;
    +
    +create_table_definition :
    + (T_AS? T_OPEN_P select_stmt T_CLOSE_P | T_AS? select_stmt | T_OPEN_P create_table_columns T_CLOSE_P) create_table_options?
           ;

      create_table_columns :
    @@ -262,7 +268,7 @@ create_table_preoptions :
           ;

      create_table_preoptions_item :
    - T_NO? T_LOG
    + T_NO? (T_LOG | T_FALLBACK)
           ;

      create_table_options :
    @@ -273,6 +279,7 @@ create_table_options_item :
             T_ON T_COMMIT (T_DELETE | T_PRESERVE) T_ROWS
    create_table_options_ora_item
    create_table_options_db2_item
    + | create_table_options_td_item
    create_table_options_hive_item
    create_table_options_mssql_item
    create_table_options_mysql_item
    @@ -296,6 +303,11 @@ create_table_options_db2_item :
    T_DEFINITION T_ONLY
           ;

    +create_table_options_td_item :
    + T_UNIQUE? T_PRIMARY T_INDEX T_OPEN_P ident (T_COMMA ident)* T_CLOSE_P
    + | T_WITH T_DATA
    + ;
    +
      create_table_options_hive_item :
             create_table_hive_row_format
           ;
    @@ -379,7 +391,7 @@ dtype : // Data types
    T_VARCHAR
    T_VARCHAR2
    T_XML
    - | L_ID ('%' (T_TYPE | T_ROWTYPE))? // User-defined or derived data type
    + | ident ('%' (T_TYPE | T_ROWTYPE))? // User-defined or derived data type
           ;

      dtype_len : // Data type length or size specification
    @@ -450,7 +462,9 @@ create_routine_params :
             T_OPEN_P T_CLOSE_P
    T_OPEN_P create_routine_param_item (T_COMMA create_routine_param_item)* T_CLOSE_P
    {!_input.LT(1).getText().equalsIgnoreCase("IS") &&
    - !_input.LT(1).getText().equalsIgnoreCase("AS")}?
    + !_input.LT(1).getText().equalsIgnoreCase("AS") &&
    + !(_input.LT(1).getText().equalsIgnoreCase("DYNAMIC") && _input.LT(2).getText().equalsIgnoreCase("RESULT"))
    + }?
             create_routine_param_item (T_COMMA create_routine_param_item)*
           ;

    @@ -484,6 +498,7 @@ exec_stmt : // EXEC, EXECUTE IMMEDIATE statement
      if_stmt : // IF statement
             if_plsql_stmt
    if_tsql_stmt
    + | if_bteq_stmt
           ;

      if_plsql_stmt :
    @@ -494,13 +509,17 @@ if_tsql_stmt :
             T_IF bool_expr single_block_stmt (T_ELSE single_block_stmt)?
           ;

    +if_bteq_stmt :
    + '.' T_IF bool_expr T_THEN single_block_stmt
    + ;
    +
      elseif_block :
             (T_ELSIF | T_ELSEIF) bool_expr T_THEN block
           ;

      else_block :
             T_ELSE block
    - ;
    + ;

      include_stmt : // INCLUDE statement
             T_INCLUDE (file_name | expr)
    @@ -571,6 +590,14 @@ fetch_stmt : // FETCH cursor statement
             T_FETCH T_FROM? L_ID T_INTO L_ID (T_COMMA L_ID)*
           ;

    +collect_stats_stmt :
    + T_COLLECT (T_STATISTICS | T_STATS) T_ON table_name collect_stats_clause?
    + ;
    +
    +collect_stats_clause :
    + T_COLUMN T_OPEN_P ident (T_COMMA ident)* T_CLOSE_P
    + ;
    +
      close_stmt : // CLOSE cursor statement
             T_CLOSE L_ID
           ;
    @@ -652,6 +679,10 @@ print_stmt : // PRINT statement
             T_PRINT expr
    T_PRINT T_OPEN_P expr T_CLOSE_P
           ;
    +
    +quit_stmt :
    + '.'? T_QUIT expr?
    + ;

      raise_stmt :
             T_RAISE
    @@ -761,7 +792,7 @@ fullselect_set_clause :
           ;

      subselect_stmt :
    - (T_SELECT | T_SEL) select_list into_clause? from_clause? where_clause? group_by_clause? having_clause? order_by_clause? select_options?
    + (T_SELECT | T_SEL) select_list into_clause? from_clause? where_clause? group_by_clause? (having_clause | qualify_clause)? order_by_clause? select_options?
           ;

      select_list :
    @@ -834,6 +865,8 @@ from_table_values_row:
      from_alias_clause :
             {!_input.LT(1).getText().equalsIgnoreCase("EXEC") &&
              !_input.LT(1).getText().equalsIgnoreCase("EXECUTE") &&
    + !_input.LT(1).getText().equalsIgnoreCase("INNER") &&
    + !_input.LT(1).getText().equalsIgnoreCase("LEFT") &&
              !_input.LT(1).getText().equalsIgnoreCase("GROUP") &&
              !_input.LT(1).getText().equalsIgnoreCase("ORDER") &&
              !_input.LT(1).getText().equalsIgnoreCase("LIMIT") &&
    @@ -856,6 +889,10 @@ group_by_clause :
      having_clause :
             T_HAVING bool_expr
           ;
    +
    +qualify_clause :
    + T_QUALIFY bool_expr
    + ;

      order_by_clause :
             T_ORDER T_BY expr (T_ASC | T_DESC)? (T_COMMA expr (T_ASC | T_DESC)?)*
    @@ -879,7 +916,7 @@ update_assignment :
           ;

      update_table :
    - (table_name | (T_OPEN_P select_stmt T_CLOSE_P)) (T_AS? ident)?
    + (table_name from_clause? | T_OPEN_P select_stmt T_CLOSE_P) (T_AS? ident)?
           ;

      update_upsert :
    @@ -905,9 +942,14 @@ merge_action :
    T_DELETE
           ;

    -delete_stmt : // DELETE statement
    - T_DELETE T_FROM? table_name (T_AS? ident)? where_clause?
    +delete_stmt :
    + T_DELETE T_FROM? table_name delete_alias? (where_clause | T_ALL)?
           ;
    +
    +delete_alias :
    + {!_input.LT(1).getText().equalsIgnoreCase("ALL")}?
    + T_AS? ident
    + ;

      describe_stmt :
             (T_DESCRIBE | T_DESC) T_TABLE? table_name
    @@ -928,6 +970,7 @@ bool_expr_atom :
      bool_expr_unary :
            expr T_IS T_NOT? T_NULL
    expr T_BETWEEN expr T_AND expr
    + | T_NOT? T_EXISTS T_OPEN_P select_stmt T_CLOSE_P
    bool_expr_single_in
    bool_expr_multi_in
          ;
    @@ -967,6 +1010,7 @@ expr :
    expr T_DIV expr
    expr T_ADD expr
    expr T_SUB expr
    + | T_OPEN_P select_stmt T_CLOSE_P
    T_OPEN_P expr T_CLOSE_P
    expr_interval
    expr_concat
    @@ -997,6 +1041,8 @@ interval_item :
    T_DAYS
    T_MICROSECOND
    T_MICROSECONDS
    + | T_SECOND
    + | T_SECONDS
           ;

      expr_concat : // String concatenation operator
    @@ -1141,8 +1187,7 @@ timestamp_literal : // TIMESTAMP 'YYYY-MM-DD HH:MI:SS.FFF'
           ;

      ident :
    - L_ID
    - | non_reserved_words
    + (L_ID | non_reserved_words) ('.' (L_ID | non_reserved_words))*
           ;

      string : // String literal (single or double quoted)
    @@ -1207,7 +1252,9 @@ non_reserved_words : // Tokens that are not reserved words
    T_CLOSE
    T_CLUSTERED
    T_CMP
    + | T_COLLECT
    T_COLLECTION
    + | T_COLUMN
    T_COMMENT
    T_CONSTANT
    T_COPY
    @@ -1229,6 +1276,7 @@ non_reserved_words : // Tokens that are not reserved words
    T_CURRENT_TIMESTAMP
    T_CURRENT_USER
    T_CURSOR
    + | T_DATA
    T_DATABASE
    T_DATE
    T_DATETIME
    @@ -1270,12 +1318,13 @@ non_reserved_words : // Tokens that are not reserved words
    T_EXCEPTION
    T_EXCLUSIVE
    T_EXISTS
    - | T_EXIT
    + | T_EXIT
    + | T_FALLBACK
    T_FALSE
    T_FETCH
    T_FIELDS
    T_FILE
    - | T_FILES
    + | T_FILES
    T_FIRST_VALUE
    T_FLOAT
    T_FOR
    @@ -1390,7 +1439,9 @@ non_reserved_words : // Tokens that are not reserved words
    T_PROC
    T_PROCEDURE
    T_PWD
    + | T_QUALIFY
    T_QUERY_BAND
    + | T_QUIT
    T_QUOTED_IDENTIFIER
    T_RAISE
    T_RANK
    @@ -1416,6 +1467,8 @@ non_reserved_words : // Tokens that are not reserved words
    T_ROW_COUNT
    T_ROW_NUMBER
    T_SCHEMA
    + | T_SECOND
    + | T_SECONDS
    T_SECURITY
    T_SEGMENT
    T_SEL
    @@ -1434,7 +1487,9 @@ non_reserved_words : // Tokens that are not reserved words
    T_SQLEXCEPTION
    T_SQLINSERT
    T_SQLSTATE
    - | T_SQLWARNING
    + | T_SQLWARNING
    + | T_STATS
    + | T_STATISTICS
    T_STEP
    T_STDEV
    T_STORAGE
    @@ -1523,7 +1578,9 @@ T_CLIENT : C L I E N T ;
      T_CLOSE : C L O S E ;
      T_CLUSTERED : C L U S T E R E D;
      T_CMP : C M P ;
    +T_COLLECT : C O L L E C T ;
      T_COLLECTION : C O L L E C T I O N ;
    +T_COLUMN : C O L U M N ;
      T_COMMENT : C O M M E N T;
      T_CONSTANT : C O N S T A N T ;
      T_COMMIT : C O M M I T ;
    @@ -1541,7 +1598,8 @@ T_CS : C S;
      T_CURRENT : C U R R E N T ;
      T_CURRENT_SCHEMA : C U R R E N T '_' S C H E M A ;
      T_CURSOR : C U R S O R ;
    -T_DATABASE : D A T A B A S E;
    +T_DATABASE : D A T A B A S E ;
    +T_DATA : D A T A ;
      T_DATE : D A T E ;
      T_DATETIME : D A T E T I M E ;
      T_DAY : D A Y ;
    @@ -1582,6 +1640,7 @@ T_EXCEPTION : E X C E P T I O N ;
      T_EXCLUSIVE : E X C L U S I V E ;
      T_EXISTS : E X I S T S ;
      T_EXIT : E X I T ;
    +T_FALLBACK : F A L L B A C K ;
      T_FALSE : F A L S E ;
      T_FETCH : F E T C H ;
      T_FIELDS : F I E L D S ;
    @@ -1694,8 +1753,10 @@ T_PRESERVE : P R E S E R V E ;
      T_PRIMARY : P R I M A R Y ;
      T_PRINT : P R I N T ;
      T_PROC : P R O C ;
    -T_PROCEDURE : P R O C E D U R E;
    +T_PROCEDURE : P R O C E D U R E ;
    +T_QUALIFY : Q U A L I F Y ;
      T_QUERY_BAND : Q U E R Y '_' B A N D ;
    +T_QUIT : Q U I T ;
      T_QUOTED_IDENTIFIER : Q U O T E D '_' I D E N T I F I E R ;
      T_RAISE : R A I S E ;
      T_REAL : R E A L ;
    @@ -1722,6 +1783,8 @@ T_RS : R S ;
      T_PWD : P W D ;
      T_TRIM : T R I M ;
      T_SCHEMA : S C H E M A ;
    +T_SECOND : S E C O N D ;
    +T_SECONDS : S E C O N D S;
      T_SECURITY : S E C U R I T Y ;
      T_SEGMENT : S E G M E N T ;
      T_SEL : S E L ;
    @@ -1742,6 +1805,8 @@ T_SQLEXCEPTION : S Q L E X C E P T I O N ;
      T_SQLINSERT : S Q L I N S E R T ;
      T_SQLSTATE : S Q L S T A T E ;
      T_SQLWARNING : S Q L W A R N I N G ;
    +T_STATS : S T A T S ;
    +T_STATISTICS : S T A T I S T I C S ;
      T_STEP : S T E P ;
      T_STORAGE : S T O R A G E ;
      T_STRING : S T R I N G ;
    @@ -1836,7 +1901,7 @@ T_CLOSE_SB : ']' ;
      T_SEMICOLON : ';' ;
      T_SUB : '-' ;

    -L_ID : L_ID_PART (L_BLANK* '.' L_BLANK* L_ID_PART)* // Identifier
    +L_ID : L_ID_PART // Identifier
                  ;
      L_S_STRING : '\'' (('\'' '\'') | ('\\' '\'') | ~('\''))* '\'' // Single quoted string literal
                  ;
    @@ -1859,6 +1924,7 @@ L_LABEL : ([a-zA-Z] | L_DIGIT | '_')* ':'
      fragment
      L_ID_PART :
                   [a-zA-Z] ([a-zA-Z] | L_DIGIT | '_')* // Identifier part
    + | '$' '{' .*? '}'
    ('_' | '@' | ':' | '#' | '$') ([a-zA-Z] | L_DIGIT | '_' | '@' | ':' | '#' | '$')+ // (at least one char must follow special char)
    '"' .*? '"' // Quoted identifiers
    '[' .*? ']'
    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
    index 02605a8..67cf2ae 100644
    --- a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
    +++ b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
    @@ -40,6 +40,7 @@ import org.antlr.v4.runtime.ParserRuleContext;
      import org.antlr.v4.runtime.Token;
      import org.antlr.v4.runtime.misc.NotNull;
      import org.antlr.v4.runtime.tree.ParseTree;
    +import org.antlr.v4.runtime.tree.TerminalNode;
      import org.apache.commons.io.FileUtils;
      import org.apache.hive.hplsql.Var.Type;
      import org.apache.hive.hplsql.functions.*;
    @@ -50,7 +51,8 @@ import org.apache.hive.hplsql.functions.*;
       */
      public class Exec extends HplsqlBaseVisitor<Integer> {

    - public static final String VERSION = "HPL/SQL 0.3.17";
    + public static final String VERSION = "HPL/SQL 0.3.31";
    + public static final String ERRORCODE = "ERRORCODE";
        public static final String SQLCODE = "SQLCODE";
        public static final String SQLSTATE = "SQLSTATE";
        public static final String HOSTCODE = "HOSTCODE";
    @@ -665,9 +667,14 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
         * Set SQLCODE
         */
        public void setSqlCode(int sqlcode) {
    + Long code = new Long(sqlcode);
          Var var = findVariable(SQLCODE);
          if (var != null) {
    - var.setValue(new Long(sqlcode));
    + var.setValue(code);
    + }
    + var = findVariable(ERRORCODE);
    + if (var != null) {
    + var.setValue(code);
          }
        }

    @@ -783,6 +790,7 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
          new FunctionMisc(this).register(function);
          new FunctionString(this).register(function);
          new FunctionOra(this).register(function);
    + addVariable(new Var(ERRORCODE, Var.Type.BIGINT, 0L));
          addVariable(new Var(SQLCODE, Var.Type.BIGINT, 0L));
          addVariable(new Var(SQLSTATE, Var.Type.STRING, "00000"));
          addVariable(new Var(HOSTCODE, Var.Type.BIGINT, 0L));
    @@ -942,9 +950,10 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
         */
        Integer getProgramReturnCode() {
          Integer rc = 0;
    - if(!signals.empty()) {
    + if (!signals.empty()) {
            Signal sig = signals.pop();
    - if(sig.type == Signal.Type.LEAVE_ROUTINE && sig.value != null) {
    + if ((sig.type == Signal.Type.LEAVE_PROGRAM || sig.type == Signal.Type.LEAVE_ROUTINE) &&
    + sig.value != null) {
              try {
                rc = Integer.parseInt(sig.value);
              }
    @@ -1133,7 +1142,7 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
          String scale = null;
          Var default_ = null;
          if (ctx.dtype().T_ROWTYPE() != null) {
    - row = meta.getRowDataType(ctx, exec.conf.defaultConnection, ctx.dtype().L_ID().getText());
    + row = meta.getRowDataType(ctx, exec.conf.defaultConnection, ctx.dtype().ident().getText());
            if (row == null) {
              type = Var.DERIVED_ROWTYPE;
            }
    @@ -1184,7 +1193,7 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
        String getDataType(HplsqlParser.Declare_var_itemContext ctx) {
          String type = null;
          if (ctx.dtype().T_TYPE() != null) {
    - type = meta.getDataType(ctx, exec.conf.defaultConnection, ctx.dtype().L_ID().getText());
    + type = meta.getDataType(ctx, exec.conf.defaultConnection, ctx.dtype().ident().getText());
            if (type == null) {
              type = Var.DERIVED_TYPE;
            }
    @@ -1349,6 +1358,11 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
        }

        @Override
    + public Integer visitCreate_table_options_td_item(HplsqlParser.Create_table_options_td_itemContext ctx) {
    + return 0;
    + }
    +
    + @Override
        public Integer visitCreate_table_options_mssql_item(HplsqlParser.Create_table_options_mssql_itemContext ctx) {
          return 0;
        }
    @@ -1678,6 +1692,14 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
        }

        /**
    + * IF statement (BTEQ syntax)
    + */
    + @Override
    + public Integer visitIf_bteq_stmt(HplsqlParser.If_bteq_stmtContext ctx) {
    + return exec.stmt.ifBteq(ctx);
    + }
    +
    + /**
         * USE statement
         */
        @Override
    @@ -1786,6 +1808,14 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
         return exec.stmt.print(ctx);
        }

    + /**
    + * QUIT statement
    + */
    + @Override
    + public Integer visitQuit_stmt(HplsqlParser.Quit_stmtContext ctx) {
    + return exec.stmt.quit(ctx);
    + }
    +
        /**
         * SIGNAL statement
         */
    @@ -2290,6 +2320,31 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
        }

        /**
    + * Append the text preserving the formatting (space symbols) between tokens
    + */
    + void append(StringBuilder str, String appendStr, Token start, Token stop) {
    + String spaces = start.getInputStream().getText(new org.antlr.v4.runtime.misc.Interval(start.getStartIndex(), stop.getStopIndex()));
    + spaces = spaces.substring(start.getText().length(), spaces.length() - stop.getText().length());
    + str.append(spaces);
    + str.append(appendStr);
    + }
    +
    + void append(StringBuilder str, TerminalNode start, TerminalNode stop) {
    + String text = start.getSymbol().getInputStream().getText(new org.antlr.v4.runtime.misc.Interval(start.getSymbol().getStartIndex(), stop.getSymbol().getStopIndex()));
    + str.append(text);
    + }
    +
    + /**
    + * Get the first non-null node
    + */
    + TerminalNode nvl(TerminalNode t1, TerminalNode t2) {
    + if (t1 != null) {
    + return t1;
    + }
    + return t2;
    + }
    +
    + /**
         * Evaluate the expression and pop value from the stack
         */
        Var evalPop(ParserRuleContext ctx) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java b/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java
    index 33ef490..c10f702 100644
    --- a/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java
    +++ b/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java
    @@ -74,8 +74,14 @@ public class Expression {
          StringBuilder sql = new StringBuilder();
          if (ctx.T_OPEN_P() != null) {
            sql.append("(");
    - sql.append(evalPop(ctx.expr(0)).toString());
    - sql.append(")");
    + if (ctx.select_stmt() != null) {
    + exec.append(sql, evalPop(ctx.select_stmt()).toString(), ctx.T_OPEN_P().getSymbol(), ctx.select_stmt().getStart());
    + exec.append(sql, ctx.T_CLOSE_P().getText(), ctx.select_stmt().stop, ctx.T_CLOSE_P().getSymbol());
    + }
    + else {
    + sql.append(evalPop(ctx.expr(0)).toString());
    + sql.append(")");
    + }
          }
          else if (ctx.T_MUL() != null) {
            sql.append(evalPop(ctx.expr(0)).toString());
    @@ -232,6 +238,11 @@ public class Expression {
            sql.append(" " + ctx.T_AND().getText() + " ");
            sql.append(evalPop(ctx.expr(2)).toString());
          }
    + else if (ctx.T_EXISTS() != null) {
    + exec.append(sql, exec.nvl(ctx.T_NOT(), ctx.T_EXISTS()), ctx.T_OPEN_P());
    + exec.append(sql, evalPop(ctx.select_stmt()).toString(), ctx.T_OPEN_P().getSymbol(), ctx.select_stmt().getStart());
    + exec.append(sql, ctx.T_CLOSE_P().getText(), ctx.select_stmt().stop, ctx.T_CLOSE_P().getSymbol());
    + }
          else if (ctx.bool_expr_single_in() != null) {
            singleInClauseSql(ctx.bool_expr_single_in(), sql);
          }
    @@ -245,14 +256,12 @@ public class Expression {
        /**
         * Single value IN clause in executable SQL statement
         */
    - public void singleInClauseSql(HplsqlParser.Bool_expr_single_inContext ctx, StringBuilder sql) {
    - sql.append(evalPop(ctx.expr(0)).toString());
    - if (ctx.T_NOT() != null) {
    - sql.append(" " + ctx.T_NOT().getText());
    - }
    - sql.append(" " + ctx.T_IN().getText() + " (");
    + public void singleInClauseSql(HplsqlParser.Bool_expr_single_inContext ctx, StringBuilder sql) {
    + sql.append(evalPop(ctx.expr(0)).toString() + " ");
    + exec.append(sql, exec.nvl(ctx.T_NOT(), ctx.T_IN()), ctx.T_OPEN_P());
          if (ctx.select_stmt() != null) {
    - sql.append(evalPop(ctx.select_stmt()));
    + exec.append(sql, evalPop(ctx.select_stmt()).toString(), ctx.T_OPEN_P().getSymbol(), ctx.select_stmt().getStart());
    + exec.append(sql, ctx.T_CLOSE_P().getText(), ctx.select_stmt().stop, ctx.T_CLOSE_P().getSymbol());
          }
          else {
            int cnt = ctx.expr().size();
    @@ -262,8 +271,8 @@ public class Expression {
                sql.append(", ");
              }
            }
    - }
    - sql.append(")");
    + sql.append(")");
    + }
        }

        /**

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/java/org/apache/hive/hplsql/Select.java
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Select.java b/hplsql/src/main/java/org/apache/hive/hplsql/Select.java
    index 4bee252..589e984 100644
    --- a/hplsql/src/main/java/org/apache/hive/hplsql/Select.java
    +++ b/hplsql/src/main/java/org/apache/hive/hplsql/Select.java
    @@ -25,6 +25,7 @@ import java.util.List;
      import java.util.Stack;

      import org.antlr.v4.runtime.ParserRuleContext;
    +import org.antlr.v4.runtime.Token;
      import org.antlr.v4.runtime.misc.Interval;

      public class Select {
    @@ -196,26 +197,38 @@ public class Select {

        public Integer subselect(HplsqlParser.Subselect_stmtContext ctx) {
          StringBuilder sql = new StringBuilder();
    - if (ctx.T_SELECT() != null) {
    - sql.append(ctx.T_SELECT().getText());
    + sql.append(ctx.start.getText());
    + exec.append(sql, evalPop(ctx.select_list()).toString(), ctx.start, ctx.select_list().getStart());
    + Token last = ctx.select_list().stop;
    + if (ctx.into_clause() != null) {
    + last = ctx.into_clause().stop;
          }
    - sql.append(" " + evalPop(ctx.select_list()));
          if (ctx.from_clause() != null) {
    - sql.append(" " + evalPop(ctx.from_clause()));
    - } else if (conf.dualTable != null) {
    + exec.append(sql, evalPop(ctx.from_clause()).toString(), last, ctx.from_clause().getStart());
    + last = ctx.from_clause().stop;
    + }
    + else if (conf.dualTable != null) {
            sql.append(" FROM " + conf.dualTable);
          }
          if (ctx.where_clause() != null) {
    - sql.append(" " + evalPop(ctx.where_clause()));
    + exec.append(sql, evalPop(ctx.where_clause()).toString(), last, ctx.where_clause().getStart());
    + last = ctx.where_clause().stop;
          }
          if (ctx.group_by_clause() != null) {
    - sql.append(" " + getText(ctx.group_by_clause()));
    + exec.append(sql, getText(ctx.group_by_clause()), last, ctx.group_by_clause().getStart());
    + last = ctx.group_by_clause().stop;
          }
          if (ctx.having_clause() != null) {
    - sql.append(" " + getText(ctx.having_clause()));
    + exec.append(sql, getText(ctx.having_clause()), last, ctx.having_clause().getStart());
    + last = ctx.having_clause().stop;
    + }
    + if (ctx.qualify_clause() != null) {
    + exec.append(sql, getText(ctx.qualify_clause()), last, ctx.qualify_clause().getStart());
    + last = ctx.qualify_clause().stop;
          }
          if (ctx.order_by_clause() != null) {
    - sql.append(" " + getText(ctx.order_by_clause()));
    + exec.append(sql, getText(ctx.order_by_clause()), last, ctx.order_by_clause().getStart());
    + last = ctx.order_by_clause().stop;
          }
          if (ctx.select_options() != null) {
            Var opt = evalPop(ctx.select_options());

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/java/org/apache/hive/hplsql/Signal.java
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Signal.java b/hplsql/src/main/java/org/apache/hive/hplsql/Signal.java
    index 2c8cfc1..ddefcd8 100644
    --- a/hplsql/src/main/java/org/apache/hive/hplsql/Signal.java
    +++ b/hplsql/src/main/java/org/apache/hive/hplsql/Signal.java
    @@ -22,7 +22,7 @@ package org.apache.hive.hplsql;
       * Signals and exceptions
       */
      public class Signal {
    - public enum Type { LEAVE_LOOP, LEAVE_ROUTINE, SQLEXCEPTION, NOTFOUND, UNSUPPORTED_OPERATION, USERDEFINED };
    + public enum Type { LEAVE_LOOP, LEAVE_ROUTINE, LEAVE_PROGRAM, SQLEXCEPTION, NOTFOUND, UNSUPPORTED_OPERATION, USERDEFINED };
        Type type;
        String value = "";
        Exception exception = null;

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java b/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java
    index d35f994..17d2195 100644
    --- a/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java
    +++ b/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java
    @@ -25,9 +25,8 @@ import java.util.Stack;
      import java.util.UUID;

      import org.antlr.v4.runtime.ParserRuleContext;
    +import org.antlr.v4.runtime.Token;
      import org.apache.hive.hplsql.Var.Type;
    -import org.apache.hive.hplsql.HplsqlParser.Create_table_columns_itemContext;
    -import org.apache.hive.hplsql.HplsqlParser.Create_table_columnsContext;

      /**
       * HPL/SQL statements execution
    @@ -130,30 +129,13 @@ public class Stmt {
        public Integer createTable(HplsqlParser.Create_table_stmtContext ctx) {
          trace(ctx, "CREATE TABLE");
          StringBuilder sql = new StringBuilder();
    - sql.append(exec.getText(ctx, ctx.T_CREATE().getSymbol(), ctx.T_TABLE().getSymbol()));
    - sql.append(" " + evalPop(ctx.table_name()) + " (");
    - int cnt = ctx.create_table_columns().create_table_columns_item().size();
    - int cols = 0;
    - for (int i = 0; i < cnt; i++) {
    - Create_table_columns_itemContext col = ctx.create_table_columns().create_table_columns_item(i);
    - if (col.create_table_column_cons() != null) {
    - continue;
    - }
    - if (cols > 0) {
    - sql.append(",\n");
    - }
    - sql.append(evalPop(col.column_name()));
    - sql.append(" ");
    - sql.append(exec.evalPop(col.dtype(), col.dtype_len()));
    - cols++;
    - }
    - sql.append("\n)");
    - if (ctx.create_table_options() != null) {
    - String opt = evalPop(ctx.create_table_options()).toString();
    - if (opt != null) {
    - sql.append(" " + opt);
    - }
    + exec.append(sql, ctx.T_CREATE(), ctx.T_TABLE());
    + exec.append(sql, evalPop(ctx.table_name()).toString(), ctx.T_TABLE().getSymbol(), ctx.table_name().getStart());
    + Token last = ctx.table_name().getStop();
    + if (ctx.create_table_preoptions() != null) {
    + last = ctx.create_table_preoptions().stop;
          }
    + sql.append(createTableDefinition(ctx.create_table_definition(), last));
          trace(ctx, sql.toString());
          Query query = exec.executeSql(ctx, sql.toString(), exec.conf.defaultConnection);
          if (query.error()) {
    @@ -166,6 +148,40 @@ public class Stmt {
        }

        /**
    + * Get CREATE TABLE definition (columns or query)
    + */
    + String createTableDefinition(HplsqlParser.Create_table_definitionContext ctx, Token last) {
    + StringBuilder sql = new StringBuilder();
    + HplsqlParser.Create_table_columnsContext colCtx = ctx.create_table_columns();
    + if (colCtx != null) {
    + int cnt = colCtx.create_table_columns_item().size();
    + for (int i = 0; i < cnt; i++) {
    + HplsqlParser.Create_table_columns_itemContext col = colCtx.create_table_columns_item(i);
    + if (col.create_table_column_cons() != null) {
    + last = col.getStop();
    + continue;
    + }
    + exec.append(sql, evalPop(col.column_name()).toString(), last, col.column_name().getStop());
    + exec.append(sql, exec.evalPop(col.dtype(), col.dtype_len()), col.column_name().getStop(), col.dtype().getStart());
    + last = col.getStop();
    + }
    + exec.append(sql, ctx.T_CLOSE_P().getText(), last, ctx.T_CLOSE_P().getSymbol());
    + }
    + else {
    + exec.append(sql, evalPop(ctx.select_stmt()).toString(), last, ctx.select_stmt().getStart());
    + exec.append(sql, ctx.T_CLOSE_P().getText(), ctx.select_stmt().stop, ctx.T_CLOSE_P().getSymbol());
    + }
    + HplsqlParser.Create_table_optionsContext options = ctx.create_table_options();
    + if (options != null) {
    + String opt = evalPop(options).toString();
    + if (opt != null) {
    + sql.append(" " + opt);
    + }
    + }
    + return sql.toString();
    + }
    +
    + /**
         * CREATE TABLE options for Hive
         */
        public Integer createTableHiveOptions(HplsqlParser.Create_table_options_hive_itemContext ctx) {
    @@ -196,7 +212,6 @@ public class Stmt {
         * CREATE TABLE options for MySQL
         */
        public Integer createTableMysqlOptions(HplsqlParser.Create_table_options_mysql_itemContext ctx) {
    - StringBuilder sql = new StringBuilder();
          if (ctx.T_COMMENT() != null) {
            evalString(ctx.T_COMMENT().getText() + " " + evalPop(ctx.expr()).toSqlString());
          }
    @@ -207,11 +222,8 @@ public class Stmt {
         * DECLARE TEMPORARY TABLE statement
         */
        public Integer declareTemporaryTable(HplsqlParser.Declare_temporary_table_itemContext ctx) {
    - String name = ctx.ident().getText();
    - if (trace) {
    - trace(ctx, "DECLARE TEMPORARY TABLE " + name);
    - }
    - return createTemporaryTable(ctx, ctx.create_table_columns(), name);
    + trace(ctx, "DECLARE TEMPORARY TABLE");
    + return createTemporaryTable(ctx.ident(), ctx.create_table_definition(), ctx.create_table_preoptions());
        }

        /**
    @@ -256,37 +268,45 @@ public class Stmt {
         * CREATE LOCAL TEMPORARY | VOLATILE TABLE statement
         */
        public Integer createLocalTemporaryTable(HplsqlParser.Create_local_temp_table_stmtContext ctx) {
    - String name = ctx.ident().getText();
    - if (trace) {
    - trace(ctx, "CREATE LOCAL TEMPORARY TABLE " + name);
    - }
    - return createTemporaryTable(ctx, ctx.create_table_columns(), name);
    + trace(ctx, "CREATE LOCAL TEMPORARY TABLE");
    + return createTemporaryTable(ctx.ident(), ctx.create_table_definition(), ctx.create_table_preoptions());
         }

        /**
         * Create a temporary table statement
         */
    - public Integer createTemporaryTable(ParserRuleContext ctx, Create_table_columnsContext colCtx, String name) {
    + public Integer createTemporaryTable(HplsqlParser.IdentContext identCtx, HplsqlParser.Create_table_definitionContext defCtx,
    + HplsqlParser.Create_table_preoptionsContext optCtx) {
    + StringBuilder sql = new StringBuilder();
    + String name = identCtx.getText();
          String managedName = null;
    - String sql = null;
    - String columns = exec.getFormattedText(colCtx);
    + Token last = identCtx.getStop();
    + if (optCtx != null) {
    + last = optCtx.stop;
    + }
          if (conf.tempTables == Conf.TempTables.NATIVE) {
    - sql = "CREATE TEMPORARY TABLE " + name + "\n(" + columns + "\n)";
    - } else if (conf.tempTables == Conf.TempTables.MANAGED) {
    + sql.append("CREATE TEMPORARY TABLE " + name);
    + sql.append(createTableDefinition(defCtx, last));
    + }
    + else if (conf.tempTables == Conf.TempTables.MANAGED) {
            managedName = name + "_" + UUID.randomUUID().toString().replace("-","");
            if (!conf.tempTablesSchema.isEmpty()) {
              managedName = conf.tempTablesSchema + "." + managedName;
            }
    - sql = "CREATE TABLE " + managedName + "\n(" + columns + "\n)";
    + sql.append("CREATE TABLE " + managedName);
    + sql.append(createTableDefinition(defCtx, last));
            if (!conf.tempTablesLocation.isEmpty()) {
    - sql += "\nLOCATION '" + conf.tempTablesLocation + "/" + managedName + "'";
    + sql.append("\nLOCATION '" + conf.tempTablesLocation + "/" + managedName + "'");
            }
            if (trace) {
    - trace(ctx, "Managed table name: " + managedName);
    + trace(null, "Managed table name: " + managedName);
            }
          }
    + if (trace) {
    + trace(null, sql.toString());
    + }
          if (sql != null) {
    - Query query = exec.executeSql(ctx, sql, exec.conf.defaultConnection);
    + Query query = exec.executeSql(null, sql.toString(), exec.conf.defaultConnection);
            if (query.error()) {
              exec.signal(query);
              return 1;
    @@ -606,6 +626,19 @@ public class Stmt {
        }

        /**
    + * IF statement (BTEQ syntax)
    + */
    + public Integer ifBteq(HplsqlParser.If_bteq_stmtContext ctx) {
    + trace(ctx, "IF");
    + visit(ctx.bool_expr());
    + if (exec.stackPop().isTrue()) {
    + trace(ctx, "IF TRUE executed");
    + visit(ctx.single_block_stmt());
    + }
    + return 0;
    + }
    +
    + /**
         * Assignment from SELECT statement
         */
        public Integer assignFromSelect(HplsqlParser.Assignment_stmt_select_itemContext ctx) {
    @@ -1103,13 +1136,17 @@ public class Stmt {
          trace(ctx, "DELETE");
          String table = evalPop(ctx.table_name()).toString();
          StringBuilder sql = new StringBuilder();
    - sql.append("DELETE FROM ");
    - sql.append(table);
    - if (ctx.where_clause() != null) {
    - boolean oldBuildSql = exec.buildSql;
    - exec.buildSql = true;
    - sql.append(" " + evalPop(ctx.where_clause()).toString());
    - exec.buildSql = oldBuildSql;
    + if (ctx.T_ALL() == null) {
    + sql.append("DELETE FROM " + table);
    + if (ctx.where_clause() != null) {
    + boolean oldBuildSql = exec.buildSql;
    + exec.buildSql = true;
    + sql.append(" " + evalPop(ctx.where_clause()).toString());
    + exec.buildSql = oldBuildSql;
    + }
    + }
    + else {
    + sql.append("TRUNCATE TABLE " + table);
          }
          trace(ctx, sql.toString());
          Query query = exec.executeSql(ctx, sql.toString(), exec.conf.defaultConnection);
    @@ -1150,6 +1187,19 @@ public class Stmt {
         return 0;
        }

    + /**
    + * QUIT Statement
    + */
    + public Integer quit(HplsqlParser.Quit_stmtContext ctx) {
    + trace(ctx, "QUIT");
    + String rc = null;
    + if (ctx.expr() != null) {
    + rc = evalPop(ctx.expr()).toString();
    + }
    + exec.signal(Signal.Type.LEAVE_PROGRAM, rc);
    + return 0;
    + }
    +
        /**
         * SET current schema
         */

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/resources/hplsql-site.xml
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/main/resources/hplsql-site.xml b/hplsql/src/main/resources/hplsql-site.xml
    index 7e2d92d..05fe857 100644
    --- a/hplsql/src/main/resources/hplsql-site.xml
    +++ b/hplsql/src/main/resources/hplsql-site.xml
    @@ -12,7 +12,6 @@
      <property>
        <name>hplsql.conn.init.hiveconn</name>
        <value>
    - set mapred.job.queue.name=default;
           set hive.execution.engine=mr;
           use default;
        </value>
    @@ -36,7 +35,6 @@
      <property>
        <name>hplsql.conn.init.hive2conn</name>
        <value>
    - set mapred.job.queue.name=default;
           set hive.execution.engine=mr;
           use default;
        </value>

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
    index 80915ea..9b5a956 100644
    --- a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
    +++ b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
    @@ -222,6 +222,11 @@ public class TestHplsqlLocal {
        public void testIf2() throws Exception {
          run("if2");
        }
    +
    + @Test
    + public void testIf3Bteq() throws Exception {
    + run("if3_bteq");
    + }

        @Test
        public void testInclude() throws Exception {

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java
    index 59b7bff..3e897be 100644
    --- a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java
    +++ b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java
    @@ -64,16 +64,36 @@ public class TestHplsqlOffline {
        }

        @Test
    + public void testCreateTableTd() throws Exception {
    + run("create_table_td");
    + }
    +
    + @Test
    + public void testDeleteAll() throws Exception {
    + run("delete_all");
    + }
    +
    + @Test
        public void testInsertMysql() throws Exception {
          run("insert_mysql");
        }

        @Test
    + public void testSelect() throws Exception {
    + run("select");
    + }
    +
    + @Test
        public void testSelectDb2() throws Exception {
          run("select_db2");
        }

        @Test
    + public void testSelectTeradata() throws Exception {
    + run("select_teradata");
    + }
    +
    + @Test
        public void testUpdate() throws Exception {
          run("update");
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/queries/local/if3_bteq.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/if3_bteq.sql b/hplsql/src/test/queries/local/if3_bteq.sql
    new file mode 100644
    index 0000000..12a39a9
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/if3_bteq.sql
    @@ -0,0 +1,3 @@
    +.if errorcode = 0 then .quit errorcode
    +
    +print 'Failed: must not be executed';

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/queries/offline/create_table_td.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/offline/create_table_td.sql b/hplsql/src/test/queries/offline/create_table_td.sql
    new file mode 100644
    index 0000000..2572bb6
    --- /dev/null
    +++ b/hplsql/src/test/queries/offline/create_table_td.sql
    @@ -0,0 +1,45 @@
    +CREATE TABLE tab, NO LOG, NO FALLBACK
    + (
    + SOURCE_ID INT,
    + RUN_ID INT,
    + STATUS CHAR,
    + LOAD_START timestamp(0),
    + LOAD_END timestamp(0)
    + );
    +
    +CREATE TABLE ctl, NO LOG, NO FALLBACK
    +AS
    +(
    + SELECT
    + EBC.SOURCE_ID,
    + MAX(EBC.RUN_ID) AS RUN_ID,
    + EBC.STATUS,
    + EBC.LOAD_START,
    + EBC.LOAD_END
    + FROM
    + EBC
    + WHERE
    + EBC.SOURCE_ID = 451 AND
    + EBC.STATUS = 'R'
    + GROUP BY
    + 1,3,4,5
    +);
    +
    +CREATE SET VOLATILE TABLE ctl2, NO LOG, NO FALLBACK
    +AS
    +(
    + SELECT
    + EBC.SOURCE_ID,
    + MAX(EBC.RUN_ID) AS RUN_ID,
    + EBC.STATUS,
    + EBC.LOAD_START,
    + EBC.LOAD_END
    + FROM
    + EBC
    + WHERE
    + EBC.SOURCE_ID = 451 AND
    + EBC.STATUS = 'R'
    + GROUP BY
    + 1,3,4,5
    +) WITH DATA PRIMARY INDEX (LOAD_START,LOAD_END)
    + ON COMMIT PRESERVE ROWS ;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/queries/offline/delete_all.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/offline/delete_all.sql b/hplsql/src/test/queries/offline/delete_all.sql
    new file mode 100644
    index 0000000..e89fd48
    --- /dev/null
    +++ b/hplsql/src/test/queries/offline/delete_all.sql
    @@ -0,0 +1 @@
    +DELETE FROM TEST1_DB.WK_WRK ALL;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/queries/offline/select.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/offline/select.sql b/hplsql/src/test/queries/offline/select.sql
    new file mode 100644
    index 0000000..0b6912e
    --- /dev/null
    +++ b/hplsql/src/test/queries/offline/select.sql
    @@ -0,0 +1,42 @@
    +SELECT * FROM a where 1=1 and not exists (select * from b)--abc;
    +
    +SELECT *
    + FROM a
    + where not exists
    + (
    + select * from b
    + );
    +
    +SELECT
    + *
    + FROM
    + tab
    + WHERE FILE_DATE > (
    + SELECT
    + MAX(FILE_DATE) AS MX_C_FILE_DT
    + FROM tab
    + WHERE FLAG = 'C'
    + AND IND = 'C'
    + AND FILE_DATE <
    + ( SELECT
    + CAST( LOAD_START AS DATE)
    + FROM
    + tab
    + WHERE
    + SOURCE_ID = 451 AND
    + BATCH = 'R'
    + )
    + );
    +
    +SELECT
    +*
    +FROM
    + DLTA_POC
    + LEFT OUTER JOIN TEST3_DB.TET ORG
    + ON DLTA_POC.YS_NO = ORG.EM_CODE_A
    + AND DLTA_POC.AREA_NO = ORG.AREA_CODE_2
    + AND DLTA_POC.GNT_POC = ORG.GEN_CD
    +
    + LEFT OUTER JOIN TEST.LOCATION LOC
    + ON DLTA_POC.SE_KEY_POC = LOC.LOC_ID
    + AND LOC.LOCATION_END_DT = DATE '9999-12-31' ;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/queries/offline/select_teradata.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/offline/select_teradata.sql b/hplsql/src/test/queries/offline/select_teradata.sql
    new file mode 100644
    index 0000000..69522b8
    --- /dev/null
    +++ b/hplsql/src/test/queries/offline/select_teradata.sql
    @@ -0,0 +1,12 @@
    +SELECT branch_code,
    + branch_no,
    + c_no,
    + cd_type
    +FROM EMPLOYEE
    + WHERE S_CODE = 'C'
    + AND (branch_no) NOT IN (
    + SELECT branch_code
    + FROM DEPARTMENT
    + WHERE branch_code = 'ABC'
    + )
    +QUALIFY ROW_NUMBER() OVER (PARTITION BY c_no ORDER BY cd_type) = 1
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/db/select_into.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/db/select_into.out.txt b/hplsql/src/test/results/db/select_into.out.txt
    index 3f4ae31..6e4a69c 100644
    --- a/hplsql/src/test/results/db/select_into.out.txt
    +++ b/hplsql/src/test/results/db/select_into.out.txt
    @@ -6,7 +6,8 @@ Ln:5 DECLARE v_dec DECIMAL
      Ln:6 DECLARE v_dec0 DECIMAL
      Ln:7 DECLARE v_str STRING
      Ln:9 SELECT
    -Ln:9 SELECT CAST(1 AS BIGINT), CAST(1 AS INT), CAST(1 AS SMALLINT), CAST(1 AS TINYINT), CAST(1.1 AS DECIMAL(18,2)), CAST(1.1 AS DECIMAL(18,0)) FROM src LIMIT 1
    +Ln:9 SELECT CAST(1 AS BIGINT), CAST(1 AS INT), CAST(1 AS SMALLINT), CAST(1 AS TINYINT), CAST(1.1 AS DECIMAL(18,2)), CAST(1.1 AS DECIMAL(18,0))
    +FROM src LIMIT 1
      Ln:9 SELECT completed successfully
      Ln:9 SELECT INTO statement executed
      Ln:9 COLUMN: _c0, bigint

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/db/select_into2.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/db/select_into2.out.txt b/hplsql/src/test/results/db/select_into2.out.txt
    index 03e67ad..582fdfb 100644
    --- a/hplsql/src/test/results/db/select_into2.out.txt
    +++ b/hplsql/src/test/results/db/select_into2.out.txt
    @@ -2,7 +2,9 @@ Ln:1 DECLARE v_float float
      Ln:2 DECLARE v_double double
      Ln:3 DECLARE v_double2 double precision
      Ln:5 SELECT
    -Ln:5 select cast(1.1 as float), cast(1.1 as double), cast(1.1 as double) from src LIMIT 1
    +Ln:5 select
    + cast(1.1 as float), cast(1.1 as double), cast(1.1 as double)
    +from src LIMIT 1
      Ln:5 SELECT completed successfully
      Ln:5 SELECT INTO statement executed
      Ln:5 COLUMN: _c0, float

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/local/if3_bteq.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/if3_bteq.out.txt b/hplsql/src/test/results/local/if3_bteq.out.txt
    new file mode 100644
    index 0000000..47f3010
    --- /dev/null
    +++ b/hplsql/src/test/results/local/if3_bteq.out.txt
    @@ -0,0 +1,3 @@
    +Ln:1 IF
    +Ln:1 IF TRUE executed
    +Ln:1 QUIT
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/local/lang.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/lang.out.txt b/hplsql/src/test/results/local/lang.out.txt
    index 0047ec4..b3c460a 100644
    --- a/hplsql/src/test/results/local/lang.out.txt
    +++ b/hplsql/src/test/results/local/lang.out.txt
    @@ -7,19 +7,19 @@
      -1.0
      Ln:19 DECLARE abc int
      Ln:20 DECLARE abc.abc int
    -Ln:21 DECLARE abc . abc1 int
    +Ln:21 DECLARE abc.abc1 int
      Ln:22 DECLARE "abc" int
      Ln:23 DECLARE "abc".abc int
      Ln:24 DECLARE "abc"."abc" int
    -Ln:25 DECLARE "abc" . "abc1" int
    +Ln:25 DECLARE "abc"."abc1" int
      Ln:26 DECLARE [abc] int
      Ln:27 DECLARE [abc].abc int
      Ln:28 DECLARE [abc].[abc] int
    -Ln:29 DECLARE [abc] . [abc1] int
    +Ln:29 DECLARE [abc].[abc1] int
      Ln:30 DECLARE `abc` int
      Ln:31 DECLARE `abc`.abc int
      Ln:32 DECLARE `abc`.`abc` int
    -Ln:33 DECLARE `abc` . `abc1` int
    +Ln:33 DECLARE `abc`.`abc1` int
      Ln:34 DECLARE :new.abc int
      Ln:35 DECLARE @abc int
      Ln:36 DECLARE _abc int
    @@ -31,4 +31,4 @@ Ln:40 DECLARE abc_9 int
      2
      0
      -2
    -0
    +0
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_mssql.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/offline/create_table_mssql.out.txt b/hplsql/src/test/results/offline/create_table_mssql.out.txt
    index 43b0aa7..29d03d6 100644
    --- a/hplsql/src/test/results/offline/create_table_mssql.out.txt
    +++ b/hplsql/src/test/results/offline/create_table_mssql.out.txt
    @@ -1,24 +1,27 @@
      Ln:1 CREATE TABLE
    -Ln:1 CREATE TABLE mssql_t1 (d1 TIMESTAMP,
    -nc1 STRING,
    -n1 DECIMAL(3,0),
    -n2 DECIMAL(3),
    -n3 DECIMAL,
    -v1 STRING,
    -nv1 STRING,
    -nv2 STRING
    +Ln:1 CREATE TABLE mssql_t1 (
    + d1 TIMESTAMP,
    + nc1 STRING,
    + n1 DECIMAL(3,0),
    + n2 DECIMAL(3),
    + n3 DECIMAL,
    + v1 STRING,
    + nv1 STRING,
    + nv2 STRING
      )
      Ln:12 CREATE TABLE
    -Ln:12 CREATE TABLE `mssql_t2` (`i1` INT,
    -`v1` VARCHAR(350),
    -`v2` STRING,
    -`b1` TINYINT,
    -`d1` TIMESTAMP
    +Ln:12 CREATE TABLE `mssql_t2`(
    + `i1` INT,
    + `v1` VARCHAR(350),
    + `v2` STRING,
    + `b1` TINYINT,
    + `d1` TIMESTAMP
      )
      Ln:31 CREATE TABLE
    -Ln:31 CREATE TABLE `default`.`mssql_t3` (`v1` VARCHAR(50),
    -`s2` SMALLINT,
    -`sd1` TIMESTAMP,
    -`i1` INT,
    -`v2` VARCHAR(100)
    +Ln:31 CREATE TABLE `default`.`mssql_t3`(
    + `v1` VARCHAR(50),
    + `s2` SMALLINT,
    + `sd1` TIMESTAMP,
    + `i1` INT,
    + `v2` VARCHAR(100)
      )
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_mssql2.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/offline/create_table_mssql2.out.txt b/hplsql/src/test/results/offline/create_table_mssql2.out.txt
    index a765c4a..8341411 100644
    --- a/hplsql/src/test/results/offline/create_table_mssql2.out.txt
    +++ b/hplsql/src/test/results/offline/create_table_mssql2.out.txt
    @@ -1,10 +1,11 @@
      Ln:1 USE
      Ln:1 SQL statement: USE `mic.gr`
      Ln:14 CREATE TABLE
    -Ln:14 CREATE TABLE `downloads` (`id` int,
    -`fileName` char(255),
    -`fileType` char(10),
    -`downloads` int,
    -`fromDate` char(40),
    -`untilDate` char(40)
    +Ln:14 CREATE TABLE `downloads`(
    + `id` int,
    + `fileName` char(255),
    + `fileType` char(10),
    + `downloads` int,
    + `fromDate` char(40),
    + `untilDate` char(40)
      )
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_mysql.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/offline/create_table_mysql.out.txt b/hplsql/src/test/results/offline/create_table_mysql.out.txt
    index b835135..d07796f 100644
    --- a/hplsql/src/test/results/offline/create_table_mysql.out.txt
    +++ b/hplsql/src/test/results/offline/create_table_mysql.out.txt
    @@ -1,4 +1,5 @@
      Ln:1 CREATE TABLE
    -Ln:1 CREATE TABLE `users` (`id` int,
    -`name` STRING
    +Ln:1 CREATE TABLE IF NOT EXISTS `users` (
    + `id` int,
    + `name` STRING
      ) COMMENT 'users table'
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_ora.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/offline/create_table_ora.out.txt b/hplsql/src/test/results/offline/create_table_ora.out.txt
    index cf30c0f..972e00a 100644
    --- a/hplsql/src/test/results/offline/create_table_ora.out.txt
    +++ b/hplsql/src/test/results/offline/create_table_ora.out.txt
    @@ -1,42 +1,49 @@
      Ln:1 CREATE TABLE
    -Ln:1 CREATE TABLE ora_t1 (n1 DECIMAL(3,0),
    -v1 STRING
    +Ln:1 CREATE TABLE ora_t1 (
    + n1 DECIMAL(3,0),
    + v1 STRING
      )
      Ln:6 CREATE TABLE
    -Ln:6 CREATE TABLE `USER`.`EMP` (`EMPNO` DECIMAL(4,0),
    -`ENAME` STRING,
    -`JOB` STRING,
    -`MGR` DECIMAL(4,0),
    -`HIREDATE` DATE,
    -`SAL` DECIMAL(7,2),
    -`COMM` DECIMAL(7,2),
    -`DEPTNO` DECIMAL(2,0)
    -)
    +Ln:6 CREATE TABLE `USER`.`EMP`
    + ( `EMPNO` DECIMAL(4,0),
    + `ENAME` STRING,
    + `JOB` STRING,
    + `MGR` DECIMAL(4,0),
    + `HIREDATE` DATE,
    + `SAL` DECIMAL(7,2),
    + `COMM` DECIMAL(7,2),
    + `DEPTNO` DECIMAL(2,0)
    + )
      Ln:21 CREATE TABLE
    -Ln:21 CREATE TABLE language (id DECIMAL(7),
    -cd CHAR(2),
    -description STRING
    +Ln:21 CREATE TABLE language (
    + id DECIMAL(7),
    + cd CHAR(2),
    + description STRING
      )
      Ln:26 CREATE TABLE
    -Ln:26 CREATE TABLE author (id DECIMAL(7),
    -first_name STRING,
    -last_name STRING,
    -date_of_birth DATE,
    -year_of_birth DECIMAL(7),
    -distinguished DECIMAL(1)
    +Ln:26 CREATE TABLE author (
    + id DECIMAL(7),
    + first_name STRING,
    + last_name STRING,
    + date_of_birth DATE,
    + year_of_birth DECIMAL(7),
    + distinguished DECIMAL(1)
      )
      Ln:34 CREATE TABLE
    -Ln:34 CREATE TABLE book (id DECIMAL(7),
    -author_id DECIMAL(7),
    -title STRING,
    -published_in DECIMAL(7),
    -language_id DECIMAL(7)
    +Ln:34 CREATE TABLE book (
    + id DECIMAL(7),
    + author_id DECIMAL(7),
    + title STRING,
    + published_in DECIMAL(7),
    + language_id DECIMAL(7)
      )
      Ln:43 CREATE TABLE
    -Ln:43 CREATE TABLE book_store (name STRING
    +Ln:43 CREATE TABLE book_store (
    + name STRING
      )
      Ln:46 CREATE TABLE
    -Ln:46 CREATE TABLE book_to_book_store (name STRING,
    -book_id INTEGER,
    -stock INTEGER
    +Ln:46 CREATE TABLE book_to_book_store (
    + name STRING,
    + book_id INTEGER,
    + stock INTEGER
      )
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_ora2.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/offline/create_table_ora2.out.txt b/hplsql/src/test/results/offline/create_table_ora2.out.txt
    index 5d4e107..03f54e8 100644
    --- a/hplsql/src/test/results/offline/create_table_ora2.out.txt
    +++ b/hplsql/src/test/results/offline/create_table_ora2.out.txt
    @@ -1,5 +1,6 @@
      Ln:1 CREATE TABLE
    -Ln:1 CREATE TABLE `default`.`AUDIT_LOGS` (`RUN_ID` STRING,
    -`FILE_NAME` STRING,
    -`RUN_DATE` DATE
    -)
    \ No newline at end of file
    +Ln:1 CREATE TABLE `default`.`AUDIT_LOGS`
    + ( `RUN_ID` STRING,
    + `FILE_NAME` STRING,
    + `RUN_DATE` DATE
    + )
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_pg.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/offline/create_table_pg.out.txt b/hplsql/src/test/results/offline/create_table_pg.out.txt
    index cad5488..095eb12 100644
    --- a/hplsql/src/test/results/offline/create_table_pg.out.txt
    +++ b/hplsql/src/test/results/offline/create_table_pg.out.txt
    @@ -1,5 +1,6 @@
      Ln:1 CREATE TABLE
    -Ln:1 create table i1 (c1 SMALLINT,
    -c2 INT,
    -c3 BIGINT
    +Ln:1 create table i1 (
    + c1 SMALLINT,
    + c2 INT,
    + c3 BIGINT
      )
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_td.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/offline/create_table_td.out.txt b/hplsql/src/test/results/offline/create_table_td.out.txt
    new file mode 100644
    index 0000000..9b9d561
    --- /dev/null
    +++ b/hplsql/src/test/results/offline/create_table_td.out.txt
    @@ -0,0 +1,31 @@
    +Ln:1 CREATE TABLE
    +Ln:1 CREATE TABLE tab
    + (
    + SOURCE_ID INT,
    + RUN_ID INT,
    + STATUS CHAR,
    + LOAD_START timestamp(0),
    + LOAD_END timestamp(0)
    + )
    +Ln:10 CREATE TABLE
    +Ln:10 CREATE TABLE ctl
    +AS
    +(
    + SELECT
    + EBC.SOURCE_ID, MAX(EBC.RUN_ID) AS RUN_ID, EBC.STATUS, EBC.LOAD_START, EBC.LOAD_END
    + FROM EBC
    + WHERE EBC.SOURCE_ID = 451 AND EBC.STATUS = 'R'
    + GROUP BY
    + 1,3,4,5
    +)
    +Ln:28 CREATE LOCAL TEMPORARY TABLE
    +CREATE TEMPORARY TABLE ctl2
    +AS
    +(
    + SELECT
    + EBC.SOURCE_ID, MAX(EBC.RUN_ID) AS RUN_ID, EBC.STATUS, EBC.LOAD_START, EBC.LOAD_END
    + FROM EBC
    + WHERE EBC.SOURCE_ID = 451 AND EBC.STATUS = 'R'
    + GROUP BY
    + 1,3,4,5
    +)
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/delete_all.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/offline/delete_all.out.txt b/hplsql/src/test/results/offline/delete_all.out.txt
    new file mode 100644
    index 0000000..0cecc95
    --- /dev/null
    +++ b/hplsql/src/test/results/offline/delete_all.out.txt
    @@ -0,0 +1,2 @@
    +Ln:1 DELETE
    +Ln:1 TRUNCATE TABLE TEST1_DB.WK_WRK
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/select.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/offline/select.out.txt b/hplsql/src/test/results/offline/select.out.txt
    new file mode 100644
    index 0000000..529f0b5
    --- /dev/null
    +++ b/hplsql/src/test/results/offline/select.out.txt
    @@ -0,0 +1,34 @@
    +Ln:1 SELECT
    +Ln:1 SELECT * FROM a where 1 = 1 and not exists (select * from b)
    +Ln:1 Not executed - offline mode set
    +Ln:3 SELECT
    +Ln:3 SELECT *
    + FROM a
    + where not exists
    + (
    + select * from b
    + )
    +Ln:3 Not executed - offline mode set
    +Ln:10 SELECT
    +Ln:10 SELECT
    + *
    + FROM tab
    + WHERE FILE_DATE > (
    + SELECT
    + MAX(FILE_DATE) AS MX_C_FILE_DT
    + FROM tab
    + WHERE FLAG = 'C' AND IND = 'C' AND FILE_DATE < ( SELECT
    + CAST( LOAD_START AS DATE)
    + FROM tab
    + WHERE SOURCE_ID = 451 AND BATCH = 'R'
    + )
    + )
    +Ln:10 Not executed - offline mode set
    +Ln:31 SELECT
    +Ln:31 SELECT
    +*
    +FROM DLTA_POC LEFT OUTER JOIN TEST3_DB.TET ORG ON DLTA_POC.YS_NO = ORG.EM_CODE_A
    + AND DLTA_POC.AREA_NO = ORG.AREA_CODE_2
    + AND DLTA_POC.GNT_POC = ORG.GEN_CD LEFT OUTER JOIN TEST.LOCATION LOC ON DLTA_POC.SE_KEY_POC = LOC.LOC_ID
    + AND LOC.LOCATION_END_DT = DATE '9999-12-31'
    +Ln:31 Not executed - offline mode set
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/select_db2.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/offline/select_db2.out.txt b/hplsql/src/test/results/offline/select_db2.out.txt
    index 1d64e8a..bb5b455 100644
    --- a/hplsql/src/test/results/offline/select_db2.out.txt
    +++ b/hplsql/src/test/results/offline/select_db2.out.txt
    @@ -2,5 +2,6 @@ Ln:1 SELECT
      Ln:1 select coalesce(max(info_id) + 1, 0) from sproc_info
      Ln:1 Not executed - offline mode set
      Ln:3 SELECT
    -Ln:3 select cd, cd + inc days, cd - inc days + coalesce(inc, 0) days from (select date '2015-09-02' as cd, 3 as inc from sysibm.sysdummy1)
    +Ln:3 select cd, cd + inc days, cd - inc days + coalesce(inc, 0) days
    +from (select date '2015-09-02' as cd, 3 as inc from sysibm.sysdummy1)
      Ln:3 Not executed - offline mode set
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/select_teradata.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/offline/select_teradata.out.txt b/hplsql/src/test/results/offline/select_teradata.out.txt
    new file mode 100644
    index 0000000..34ab433
    --- /dev/null
    +++ b/hplsql/src/test/results/offline/select_teradata.out.txt
    @@ -0,0 +1,10 @@
    +Ln:1 SELECT
    +Ln:1 SELECT branch_code, branch_no, c_no, cd_type
    +FROM EMPLOYEE
    + WHERE S_CODE = 'C' AND (branch_no) NOT IN (
    + SELECT branch_code
    + FROM DEPARTMENT
    + WHERE branch_code = 'ABC'
    + )
    +QUALIFY ROW_NUMBER() OVER (PARTITION BY c_no ORDER BY cd_type) = 1
    +Ln:1 Not executed - offline mode set
    \ No newline at end of file
  • Spena at May 6, 2016 at 8:42 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
    ----------------------------------------------------------------------
    diff --git a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
    index 820f6be..16531e3 100644
    --- a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
    +++ b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
    @@ -3212,202 +3212,73 @@ public final class LlapDaemonProtocolProtos {
          // @@protoc_insertion_point(class_scope:GroupInputSpecProto)
        }

    - public interface FragmentSpecProtoOrBuilder
    + public interface VertexIdentifierOrBuilder
            extends com.google.protobuf.MessageOrBuilder {

    - // optional string fragment_identifier_string = 1;
    + // optional string application_id_string = 1;
          /**
    - * <code>optional string fragment_identifier_string = 1;</code>
    + * <code>optional string application_id_string = 1;</code>
           */
    - boolean hasFragmentIdentifierString();
    + boolean hasApplicationIdString();
          /**
    - * <code>optional string fragment_identifier_string = 1;</code>
    + * <code>optional string application_id_string = 1;</code>
           */
    - java.lang.String getFragmentIdentifierString();
    + java.lang.String getApplicationIdString();
          /**
    - * <code>optional string fragment_identifier_string = 1;</code>
    + * <code>optional string application_id_string = 1;</code>
           */
          com.google.protobuf.ByteString
    - getFragmentIdentifierStringBytes();
    + getApplicationIdStringBytes();

    - // optional string dag_name = 2;
    - /**
    - * <code>optional string dag_name = 2;</code>
    - */
    - boolean hasDagName();
    + // optional int32 app_attempt_number = 2;
          /**
    - * <code>optional string dag_name = 2;</code>
    + * <code>optional int32 app_attempt_number = 2;</code>
           */
    - java.lang.String getDagName();
    + boolean hasAppAttemptNumber();
          /**
    - * <code>optional string dag_name = 2;</code>
    + * <code>optional int32 app_attempt_number = 2;</code>
           */
    - com.google.protobuf.ByteString
    - getDagNameBytes();
    + int getAppAttemptNumber();

    - // optional int32 dag_id = 11;
    + // optional int32 dag_id = 3;
          /**
    - * <code>optional int32 dag_id = 11;</code>
    + * <code>optional int32 dag_id = 3;</code>
           */
          boolean hasDagId();
          /**
    - * <code>optional int32 dag_id = 11;</code>
    + * <code>optional int32 dag_id = 3;</code>
           */
          int getDagId();

    - // optional string vertex_name = 3;
    - /**
    - * <code>optional string vertex_name = 3;</code>
    - */
    - boolean hasVertexName();
    - /**
    - * <code>optional string vertex_name = 3;</code>
    - */
    - java.lang.String getVertexName();
    - /**
    - * <code>optional string vertex_name = 3;</code>
    - */
    - com.google.protobuf.ByteString
    - getVertexNameBytes();
    -
    - // optional .EntityDescriptorProto processor_descriptor = 4;
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - boolean hasProcessorDescriptor();
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getProcessorDescriptor();
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getProcessorDescriptorOrBuilder();
    -
    - // repeated .IOSpecProto input_specs = 5;
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto>
    - getInputSpecsList();
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getInputSpecs(int index);
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - int getInputSpecsCount();
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - java.util.List<? extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
    - getInputSpecsOrBuilderList();
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getInputSpecsOrBuilder(
    - int index);
    -
    - // repeated .IOSpecProto output_specs = 6;
    - /**
    - * <code>repeated .IOSpecProto output_specs = 6;</code>
    - */
    - java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto>
    - getOutputSpecsList();
    - /**
    - * <code>repeated .IOSpecProto output_specs = 6;</code>
    - */
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getOutputSpecs(int index);
    - /**
    - * <code>repeated .IOSpecProto output_specs = 6;</code>
    - */
    - int getOutputSpecsCount();
    - /**
    - * <code>repeated .IOSpecProto output_specs = 6;</code>
    - */
    - java.util.List<? extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
    - getOutputSpecsOrBuilderList();
    - /**
    - * <code>repeated .IOSpecProto output_specs = 6;</code>
    - */
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getOutputSpecsOrBuilder(
    - int index);
    -
    - // repeated .GroupInputSpecProto grouped_input_specs = 7;
    - /**
    - * <code>repeated .GroupInputSpecProto grouped_input_specs = 7;</code>
    - */
    - java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto>
    - getGroupedInputSpecsList();
    - /**
    - * <code>repeated .GroupInputSpecProto grouped_input_specs = 7;</code>
    - */
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto getGroupedInputSpecs(int index);
    - /**
    - * <code>repeated .GroupInputSpecProto grouped_input_specs = 7;</code>
    - */
    - int getGroupedInputSpecsCount();
    - /**
    - * <code>repeated .GroupInputSpecProto grouped_input_specs = 7;</code>
    - */
    - java.util.List<? extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder>
    - getGroupedInputSpecsOrBuilderList();
    - /**
    - * <code>repeated .GroupInputSpecProto grouped_input_specs = 7;</code>
    - */
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder getGroupedInputSpecsOrBuilder(
    - int index);
    -
    - // optional int32 vertex_parallelism = 8;
    - /**
    - * <code>optional int32 vertex_parallelism = 8;</code>
    - */
    - boolean hasVertexParallelism();
    - /**
    - * <code>optional int32 vertex_parallelism = 8;</code>
    - */
    - int getVertexParallelism();
    -
    - // optional int32 fragment_number = 9;
    - /**
    - * <code>optional int32 fragment_number = 9;</code>
    - */
    - boolean hasFragmentNumber();
    - /**
    - * <code>optional int32 fragment_number = 9;</code>
    - */
    - int getFragmentNumber();
    -
    - // optional int32 attempt_number = 10;
    + // optional int32 vertex_id = 4;
          /**
    - * <code>optional int32 attempt_number = 10;</code>
    + * <code>optional int32 vertex_id = 4;</code>
           */
    - boolean hasAttemptNumber();
    + boolean hasVertexId();
          /**
    - * <code>optional int32 attempt_number = 10;</code>
    + * <code>optional int32 vertex_id = 4;</code>
           */
    - int getAttemptNumber();
    + int getVertexId();
        }
        /**
    - * Protobuf type {@code FragmentSpecProto}
    + * Protobuf type {@code VertexIdentifier}
         */
    - public static final class FragmentSpecProto extends
    + public static final class VertexIdentifier extends
            com.google.protobuf.GeneratedMessage
    - implements FragmentSpecProtoOrBuilder {
    - // Use FragmentSpecProto.newBuilder() to construct.
    - private FragmentSpecProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
    + implements VertexIdentifierOrBuilder {
    + // Use VertexIdentifier.newBuilder() to construct.
    + private VertexIdentifier(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
            super(builder);
            this.unknownFields = builder.getUnknownFields();
          }
    - private FragmentSpecProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
    + private VertexIdentifier(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }

    - private static final FragmentSpecProto defaultInstance;
    - public static FragmentSpecProto getDefaultInstance() {
    + private static final VertexIdentifier defaultInstance;
    + public static VertexIdentifier getDefaultInstance() {
            return defaultInstance;
          }

    - public FragmentSpecProto getDefaultInstanceForType() {
    + public VertexIdentifier getDefaultInstanceForType() {
            return defaultInstance;
          }

    @@ -3417,7 +3288,7 @@ public final class LlapDaemonProtocolProtos {
              getUnknownFields() {
            return this.unknownFields;
          }
    - private FragmentSpecProto(
    + private VertexIdentifier(
              com.google.protobuf.CodedInputStream input,
              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
              throws com.google.protobuf.InvalidProtocolBufferException {
    @@ -3442,76 +3313,24 @@ public final class LlapDaemonProtocolProtos {
                  }
                  case 10: {
                    bitField0_ |= 0x00000001;
    - fragmentIdentifierString_ = input.readBytes();
    + applicationIdString_ = input.readBytes();
                    break;
                  }
    - case 18: {
    + case 16: {
                    bitField0_ |= 0x00000002;
    - dagName_ = input.readBytes();
    - break;
    - }
    - case 26: {
    - bitField0_ |= 0x00000008;
    - vertexName_ = input.readBytes();
    - break;
    - }
    - case 34: {
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder subBuilder = null;
    - if (((bitField0_ & 0x00000010) == 0x00000010)) {
    - subBuilder = processorDescriptor_.toBuilder();
    - }
    - processorDescriptor_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.PARSER, extensionRegistry);
    - if (subBuilder != null) {
    - subBuilder.mergeFrom(processorDescriptor_);
    - processorDescriptor_ = subBuilder.buildPartial();
    - }
    - bitField0_ |= 0x00000010;
    - break;
    - }
    - case 42: {
    - if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
    - inputSpecs_ = new java.util.ArrayList<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto>();
    - mutable_bitField0_ |= 0x00000020;
    - }
    - inputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.PARSER, extensionRegistry));
    - break;
    - }
    - case 50: {
    - if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
    - outputSpecs_ = new java.util.ArrayList<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto>();
    - mutable_bitField0_ |= 0x00000040;
    - }
    - outputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.PARSER, extensionRegistry));
    - break;
    - }
    - case 58: {
    - if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
    - groupedInputSpecs_ = new java.util.ArrayList<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto>();
    - mutable_bitField0_ |= 0x00000080;
    - }
    - groupedInputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.PARSER, extensionRegistry));
    - break;
    - }
    - case 64: {
    - bitField0_ |= 0x00000020;
    - vertexParallelism_ = input.readInt32();
    - break;
    - }
    - case 72: {
    - bitField0_ |= 0x00000040;
    - fragmentNumber_ = input.readInt32();
    - break;
    - }
    - case 80: {
    - bitField0_ |= 0x00000080;
    - attemptNumber_ = input.readInt32();
    + appAttemptNumber_ = input.readInt32();
                    break;
                  }
    - case 88: {
    + case 24: {
                    bitField0_ |= 0x00000004;
                    dagId_ = input.readInt32();
                    break;
                  }
    + case 32: {
    + bitField0_ |= 0x00000008;
    + vertexId_ = input.readInt32();
    + break;
    + }
                }
              }
            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
    @@ -3520,61 +3339,52 @@ public final class LlapDaemonProtocolProtos {
              throw new com.google.protobuf.InvalidProtocolBufferException(
                  e.getMessage()).setUnfinishedMessage(this);
            } finally {
    - if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
    - inputSpecs_ = java.util.Collections.unmodifiableList(inputSpecs_);
    - }
    - if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
    - outputSpecs_ = java.util.Collections.unmodifiableList(outputSpecs_);
    - }
    - if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
    - groupedInputSpecs_ = java.util.Collections.unmodifiableList(groupedInputSpecs_);
    - }
              this.unknownFields = unknownFields.build();
              makeExtensionsImmutable();
            }
          }
          public static final com.google.protobuf.Descriptors.Descriptor
              getDescriptor() {
    - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentSpecProto_descriptor;
    + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_VertexIdentifier_descriptor;
          }

          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
              internalGetFieldAccessorTable() {
    - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentSpecProto_fieldAccessorTable
    + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_VertexIdentifier_fieldAccessorTable
                .ensureFieldAccessorsInitialized(
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.Builder.class);
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier.Builder.class);
          }

    - public static com.google.protobuf.Parser<FragmentSpecProto> PARSER =
    - new com.google.protobuf.AbstractParser<FragmentSpecProto>() {
    - public FragmentSpecProto parsePartialFrom(
    + public static com.google.protobuf.Parser<VertexIdentifier> PARSER =
    + new com.google.protobuf.AbstractParser<VertexIdentifier>() {
    + public VertexIdentifier parsePartialFrom(
                com.google.protobuf.CodedInputStream input,
                com.google.protobuf.ExtensionRegistryLite extensionRegistry)
                throws com.google.protobuf.InvalidProtocolBufferException {
    - return new FragmentSpecProto(input, extensionRegistry);
    + return new VertexIdentifier(input, extensionRegistry);
            }
          };

          @java.lang.Override
    - public com.google.protobuf.Parser<FragmentSpecProto> getParserForType() {
    + public com.google.protobuf.Parser<VertexIdentifier> getParserForType() {
            return PARSER;
          }

          private int bitField0_;
    - // optional string fragment_identifier_string = 1;
    - public static final int FRAGMENT_IDENTIFIER_STRING_FIELD_NUMBER = 1;
    - private java.lang.Object fragmentIdentifierString_;
    + // optional string application_id_string = 1;
    + public static final int APPLICATION_ID_STRING_FIELD_NUMBER = 1;
    + private java.lang.Object applicationIdString_;
          /**
    - * <code>optional string fragment_identifier_string = 1;</code>
    + * <code>optional string application_id_string = 1;</code>
           */
    - public boolean hasFragmentIdentifierString() {
    + public boolean hasApplicationIdString() {
            return ((bitField0_ & 0x00000001) == 0x00000001);
          }
          /**
    - * <code>optional string fragment_identifier_string = 1;</code>
    + * <code>optional string application_id_string = 1;</code>
           */
    - public java.lang.String getFragmentIdentifierString() {
    - java.lang.Object ref = fragmentIdentifierString_;
    + public java.lang.String getApplicationIdString() {
    + java.lang.Object ref = applicationIdString_;
            if (ref instanceof java.lang.String) {
              return (java.lang.String) ref;
            } else {
    @@ -3582,320 +3392,81 @@ public final class LlapDaemonProtocolProtos {
                  (com.google.protobuf.ByteString) ref;
              java.lang.String s = bs.toStringUtf8();
              if (bs.isValidUtf8()) {
    - fragmentIdentifierString_ = s;
    + applicationIdString_ = s;
              }
              return s;
            }
          }
          /**
    - * <code>optional string fragment_identifier_string = 1;</code>
    + * <code>optional string application_id_string = 1;</code>
           */
          public com.google.protobuf.ByteString
    - getFragmentIdentifierStringBytes() {
    - java.lang.Object ref = fragmentIdentifierString_;
    + getApplicationIdStringBytes() {
    + java.lang.Object ref = applicationIdString_;
            if (ref instanceof java.lang.String) {
              com.google.protobuf.ByteString b =
                  com.google.protobuf.ByteString.copyFromUtf8(
                      (java.lang.String) ref);
    - fragmentIdentifierString_ = b;
    + applicationIdString_ = b;
              return b;
            } else {
              return (com.google.protobuf.ByteString) ref;
            }
          }

    - // optional string dag_name = 2;
    - public static final int DAG_NAME_FIELD_NUMBER = 2;
    - private java.lang.Object dagName_;
    + // optional int32 app_attempt_number = 2;
    + public static final int APP_ATTEMPT_NUMBER_FIELD_NUMBER = 2;
    + private int appAttemptNumber_;
          /**
    - * <code>optional string dag_name = 2;</code>
    + * <code>optional int32 app_attempt_number = 2;</code>
           */
    - public boolean hasDagName() {
    + public boolean hasAppAttemptNumber() {
            return ((bitField0_ & 0x00000002) == 0x00000002);
          }
          /**
    - * <code>optional string dag_name = 2;</code>
    - */
    - public java.lang.String getDagName() {
    - java.lang.Object ref = dagName_;
    - if (ref instanceof java.lang.String) {
    - return (java.lang.String) ref;
    - } else {
    - com.google.protobuf.ByteString bs =
    - (com.google.protobuf.ByteString) ref;
    - java.lang.String s = bs.toStringUtf8();
    - if (bs.isValidUtf8()) {
    - dagName_ = s;
    - }
    - return s;
    - }
    - }
    - /**
    - * <code>optional string dag_name = 2;</code>
    + * <code>optional int32 app_attempt_number = 2;</code>
           */
    - public com.google.protobuf.ByteString
    - getDagNameBytes() {
    - java.lang.Object ref = dagName_;
    - if (ref instanceof java.lang.String) {
    - com.google.protobuf.ByteString b =
    - com.google.protobuf.ByteString.copyFromUtf8(
    - (java.lang.String) ref);
    - dagName_ = b;
    - return b;
    - } else {
    - return (com.google.protobuf.ByteString) ref;
    - }
    + public int getAppAttemptNumber() {
    + return appAttemptNumber_;
          }

    - // optional int32 dag_id = 11;
    - public static final int DAG_ID_FIELD_NUMBER = 11;
    + // optional int32 dag_id = 3;
    + public static final int DAG_ID_FIELD_NUMBER = 3;
          private int dagId_;
          /**
    - * <code>optional int32 dag_id = 11;</code>
    + * <code>optional int32 dag_id = 3;</code>
           */
          public boolean hasDagId() {
            return ((bitField0_ & 0x00000004) == 0x00000004);
          }
          /**
    - * <code>optional int32 dag_id = 11;</code>
    + * <code>optional int32 dag_id = 3;</code>
           */
          public int getDagId() {
            return dagId_;
          }

    - // optional string vertex_name = 3;
    - public static final int VERTEX_NAME_FIELD_NUMBER = 3;
    - private java.lang.Object vertexName_;
    + // optional int32 vertex_id = 4;
    + public static final int VERTEX_ID_FIELD_NUMBER = 4;
    + private int vertexId_;
          /**
    - * <code>optional string vertex_name = 3;</code>
    + * <code>optional int32 vertex_id = 4;</code>
           */
    - public boolean hasVertexName() {
    + public boolean hasVertexId() {
            return ((bitField0_ & 0x00000008) == 0x00000008);
          }
          /**
    - * <code>optional string vertex_name = 3;</code>
    - */
    - public java.lang.String getVertexName() {
    - java.lang.Object ref = vertexName_;
    - if (ref instanceof java.lang.String) {
    - return (java.lang.String) ref;
    - } else {
    - com.google.protobuf.ByteString bs =
    - (com.google.protobuf.ByteString) ref;
    - java.lang.String s = bs.toStringUtf8();
    - if (bs.isValidUtf8()) {
    - vertexName_ = s;
    - }
    - return s;
    - }
    - }
    - /**
    - * <code>optional string vertex_name = 3;</code>
    + * <code>optional int32 vertex_id = 4;</code>
           */
    - public com.google.protobuf.ByteString
    - getVertexNameBytes() {
    - java.lang.Object ref = vertexName_;
    - if (ref instanceof java.lang.String) {
    - com.google.protobuf.ByteString b =
    - com.google.protobuf.ByteString.copyFromUtf8(
    - (java.lang.String) ref);
    - vertexName_ = b;
    - return b;
    - } else {
    - return (com.google.protobuf.ByteString) ref;
    - }
    - }
    -
    - // optional .EntityDescriptorProto processor_descriptor = 4;
    - public static final int PROCESSOR_DESCRIPTOR_FIELD_NUMBER = 4;
    - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto processorDescriptor_;
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - public boolean hasProcessorDescriptor() {
    - return ((bitField0_ & 0x00000010) == 0x00000010);
    - }
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getProcessorDescriptor() {
    - return processorDescriptor_;
    - }
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getProcessorDescriptorOrBuilder() {
    - return processorDescriptor_;
    - }
    -
    - // repeated .IOSpecProto input_specs = 5;
    - public static final int INPUT_SPECS_FIELD_NUMBER = 5;
    - private java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto> inputSpecs_;
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - public java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto> getInputSpecsList() {
    - return inputSpecs_;
    - }
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - public java.util.List<? extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
    - getInputSpecsOrBuilderList() {
    - return inputSpecs_;
    - }
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - public int getInputSpecsCount() {
    - return inputSpecs_.size();
    - }
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getInputSpecs(int index) {
    - return inputSpecs_.get(index);
    - }
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getInputSpecsOrBuilder(
    - int index) {
    - return inputSpecs_.get(index);
    - }
    -
    - // repeated .IOSpecProto output_specs = 6;
    - public static final int OUTPUT_SPECS_FIELD_NUMBER = 6;
    - private java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto> outputSpecs_;
    - /**
    - * <code>repeated .IOSpecProto output_specs = 6;</code>
    - */
    - public java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto> getOutputSpecsList() {
    - return outputSpecs_;
    - }
    - /**
    - * <code>repeated .IOSpecProto output_specs = 6;</code>
    - */
    - public java.util.List<? extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
    - getOutputSpecsOrBuilderList() {
    - return outputSpecs_;
    - }
    - /**
    - * <code>repeated .IOSpecProto output_specs = 6;</code>
    - */
    - public int getOutputSpecsCount() {
    - return outputSpecs_.size();
    - }
    - /**
    - * <code>repeated .IOSpecProto output_specs = 6;</code>
    - */
    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getOutputSpecs(int index) {
    - return outputSpecs_.get(index);
    - }
    - /**
    - * <code>repeated .IOSpecProto output_specs = 6;</code>
    - */
    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getOutputSpecsOrBuilder(
    - int index) {
    - return outputSpecs_.get(index);
    - }
    -
    - // repeated .GroupInputSpecProto grouped_input_specs = 7;
    - public static final int GROUPED_INPUT_SPECS_FIELD_NUMBER = 7;
    - private java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto> groupedInputSpecs_;
    - /**
    - * <code>repeated .GroupInputSpecProto grouped_input_specs = 7;</code>
    - */
    - public java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto> getGroupedInputSpecsList() {
    - return groupedInputSpecs_;
    - }
    - /**
    - * <code>repeated .GroupInputSpecProto grouped_input_specs = 7;</code>
    - */
    - public java.util.List<? extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder>
    - getGroupedInputSpecsOrBuilderList() {
    - return groupedInputSpecs_;
    - }
    - /**
    - * <code>repeated .GroupInputSpecProto grouped_input_specs = 7;</code>
    - */
    - public int getGroupedInputSpecsCount() {
    - return groupedInputSpecs_.size();
    - }
    - /**
    - * <code>repeated .GroupInputSpecProto grouped_input_specs = 7;</code>
    - */
    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto getGroupedInputSpecs(int index) {
    - return groupedInputSpecs_.get(index);
    - }
    - /**
    - * <code>repeated .GroupInputSpecProto grouped_input_specs = 7;</code>
    - */
    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder getGroupedInputSpecsOrBuilder(
    - int index) {
    - return groupedInputSpecs_.get(index);
    - }
    -
    - // optional int32 vertex_parallelism = 8;
    - public static final int VERTEX_PARALLELISM_FIELD_NUMBER = 8;
    - private int vertexParallelism_;
    - /**
    - * <code>optional int32 vertex_parallelism = 8;</code>
    - */
    - public boolean hasVertexParallelism() {
    - return ((bitField0_ & 0x00000020) == 0x00000020);
    - }
    - /**
    - * <code>optional int32 vertex_parallelism = 8;</code>
    - */
    - public int getVertexParallelism() {
    - return vertexParallelism_;
    - }
    -
    - // optional int32 fragment_number = 9;
    - public static final int FRAGMENT_NUMBER_FIELD_NUMBER = 9;
    - private int fragmentNumber_;
    - /**
    - * <code>optional int32 fragment_number = 9;</code>
    - */
    - public boolean hasFragmentNumber() {
    - return ((bitField0_ & 0x00000040) == 0x00000040);
    - }
    - /**
    - * <code>optional int32 fragment_number = 9;</code>
    - */
    - public int getFragmentNumber() {
    - return fragmentNumber_;
    - }
    -
    - // optional int32 attempt_number = 10;
    - public static final int ATTEMPT_NUMBER_FIELD_NUMBER = 10;
    - private int attemptNumber_;
    - /**
    - * <code>optional int32 attempt_number = 10;</code>
    - */
    - public boolean hasAttemptNumber() {
    - return ((bitField0_ & 0x00000080) == 0x00000080);
    - }
    - /**
    - * <code>optional int32 attempt_number = 10;</code>
    - */
    - public int getAttemptNumber() {
    - return attemptNumber_;
    + public int getVertexId() {
    + return vertexId_;
          }

          private void initFields() {
    - fragmentIdentifierString_ = "";
    - dagName_ = "";
    + applicationIdString_ = "";
    + appAttemptNumber_ = 0;
            dagId_ = 0;
    - vertexName_ = "";
    - processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
    - inputSpecs_ = java.util.Collections.emptyList();
    - outputSpecs_ = java.util.Collections.emptyList();
    - groupedInputSpecs_ = java.util.Collections.emptyList();
    - vertexParallelism_ = 0;
    - fragmentNumber_ = 0;
    - attemptNumber_ = 0;
    + vertexId_ = 0;
          }
          private byte memoizedIsInitialized = -1;
          public final boolean isInitialized() {
    @@ -3910,37 +3481,16 @@ public final class LlapDaemonProtocolProtos {
                              throws java.io.IOException {
            getSerializedSize();
            if (((bitField0_ & 0x00000001) == 0x00000001)) {
    - output.writeBytes(1, getFragmentIdentifierStringBytes());
    + output.writeBytes(1, getApplicationIdStringBytes());
            }
            if (((bitField0_ & 0x00000002) == 0x00000002)) {
    - output.writeBytes(2, getDagNameBytes());
    - }
    - if (((bitField0_ & 0x00000008) == 0x00000008)) {
    - output.writeBytes(3, getVertexNameBytes());
    - }
    - if (((bitField0_ & 0x00000010) == 0x00000010)) {
    - output.writeMessage(4, processorDescriptor_);
    - }
    - for (int i = 0; i < inputSpecs_.size(); i++) {
    - output.writeMessage(5, inputSpecs_.get(i));
    - }
    - for (int i = 0; i < outputSpecs_.size(); i++) {
    - output.writeMessage(6, outputSpecs_.get(i));
    - }
    - for (int i = 0; i < groupedInputSpecs_.size(); i++) {
    - output.writeMessage(7, groupedInputSpecs_.get(i));
    - }
    - if (((bitField0_ & 0x00000020) == 0x00000020)) {
    - output.writeInt32(8, vertexParallelism_);
    - }
    - if (((bitField0_ & 0x00000040) == 0x00000040)) {
    - output.writeInt32(9, fragmentNumber_);
    - }
    - if (((bitField0_ & 0x00000080) == 0x00000080)) {
    - output.writeInt32(10, attemptNumber_);
    + output.writeInt32(2, appAttemptNumber_);
            }
            if (((bitField0_ & 0x00000004) == 0x00000004)) {
    - output.writeInt32(11, dagId_);
    + output.writeInt32(3, dagId_);
    + }
    + if (((bitField0_ & 0x00000008) == 0x00000008)) {
    + output.writeInt32(4, vertexId_);
            }
            getUnknownFields().writeTo(output);
          }
    @@ -3953,47 +3503,19 @@ public final class LlapDaemonProtocolProtos {
            size = 0;
            if (((bitField0_ & 0x00000001) == 0x00000001)) {
              size += com.google.protobuf.CodedOutputStream
    - .computeBytesSize(1, getFragmentIdentifierStringBytes());
    + .computeBytesSize(1, getApplicationIdStringBytes());
            }
            if (((bitField0_ & 0x00000002) == 0x00000002)) {
              size += com.google.protobuf.CodedOutputStream
    - .computeBytesSize(2, getDagNameBytes());
    - }
    - if (((bitField0_ & 0x00000008) == 0x00000008)) {
    - size += com.google.protobuf.CodedOutputStream
    - .computeBytesSize(3, getVertexNameBytes());
    - }
    - if (((bitField0_ & 0x00000010) == 0x00000010)) {
    - size += com.google.protobuf.CodedOutputStream
    - .computeMessageSize(4, processorDescriptor_);
    + .computeInt32Size(2, appAttemptNumber_);
            }
    - for (int i = 0; i < inputSpecs_.size(); i++) {
    - size += com.google.protobuf.CodedOutputStream
    - .computeMessageSize(5, inputSpecs_.get(i));
    - }
    - for (int i = 0; i < outputSpecs_.size(); i++) {
    - size += com.google.protobuf.CodedOutputStream
    - .computeMessageSize(6, outputSpecs_.get(i));
    - }
    - for (int i = 0; i < groupedInputSpecs_.size(); i++) {
    - size += com.google.protobuf.CodedOutputStream
    - .computeMessageSize(7, groupedInputSpecs_.get(i));
    - }
    - if (((bitField0_ & 0x00000020) == 0x00000020)) {
    - size += com.google.protobuf.CodedOutputStream
    - .computeInt32Size(8, vertexParallelism_);
    - }
    - if (((bitField0_ & 0x00000040) == 0x00000040)) {
    - size += com.google.protobuf.CodedOutputStream
    - .computeInt32Size(9, fragmentNumber_);
    - }
    - if (((bitField0_ & 0x00000080) == 0x00000080)) {
    + if (((bitField0_ & 0x00000004) == 0x00000004)) {
              size += com.google.protobuf.CodedOutputStream
    - .computeInt32Size(10, attemptNumber_);
    + .computeInt32Size(3, dagId_);
            }
    - if (((bitField0_ & 0x00000004) == 0x00000004)) {
    + if (((bitField0_ & 0x00000008) == 0x00000008)) {
              size += com.google.protobuf.CodedOutputStream
    - .computeInt32Size(11, dagId_);
    + .computeInt32Size(4, vertexId_);
            }
            size += getUnknownFields().getSerializedSize();
            memoizedSerializedSize = size;
    @@ -4012,57 +3534,31 @@ public final class LlapDaemonProtocolProtos {
            if (obj == this) {
             return true;
            }
    - if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto)) {
    + if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier)) {
              return super.equals(obj);
            }
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto) obj;
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier) obj;

            boolean result = true;
    - result = result && (hasFragmentIdentifierString() == other.hasFragmentIdentifierString());
    - if (hasFragmentIdentifierString()) {
    - result = result && getFragmentIdentifierString()
    - .equals(other.getFragmentIdentifierString());
    + result = result && (hasApplicationIdString() == other.hasApplicationIdString());
    + if (hasApplicationIdString()) {
    + result = result && getApplicationIdString()
    + .equals(other.getApplicationIdString());
            }
    - result = result && (hasDagName() == other.hasDagName());
    - if (hasDagName()) {
    - result = result && getDagName()
    - .equals(other.getDagName());
    + result = result && (hasAppAttemptNumber() == other.hasAppAttemptNumber());
    + if (hasAppAttemptNumber()) {
    + result = result && (getAppAttemptNumber()
    + == other.getAppAttemptNumber());
            }
            result = result && (hasDagId() == other.hasDagId());
            if (hasDagId()) {
              result = result && (getDagId()
                  == other.getDagId());
            }
    - result = result && (hasVertexName() == other.hasVertexName());
    - if (hasVertexName()) {
    - result = result && getVertexName()
    - .equals(other.getVertexName());
    - }
    - result = result && (hasProcessorDescriptor() == other.hasProcessorDescriptor());
    - if (hasProcessorDescriptor()) {
    - result = result && getProcessorDescriptor()
    - .equals(other.getProcessorDescriptor());
    - }
    - result = result && getInputSpecsList()
    - .equals(other.getInputSpecsList());
    - result = result && getOutputSpecsList()
    - .equals(other.getOutputSpecsList());
    - result = result && getGroupedInputSpecsList()
    - .equals(other.getGroupedInputSpecsList());
    - result = result && (hasVertexParallelism() == other.hasVertexParallelism());
    - if (hasVertexParallelism()) {
    - result = result && (getVertexParallelism()
    - == other.getVertexParallelism());
    - }
    - result = result && (hasFragmentNumber() == other.hasFragmentNumber());
    - if (hasFragmentNumber()) {
    - result = result && (getFragmentNumber()
    - == other.getFragmentNumber());
    - }
    - result = result && (hasAttemptNumber() == other.hasAttemptNumber());
    - if (hasAttemptNumber()) {
    - result = result && (getAttemptNumber()
    - == other.getAttemptNumber());
    + result = result && (hasVertexId() == other.hasVertexId());
    + if (hasVertexId()) {
    + result = result && (getVertexId()
    + == other.getVertexId());
            }
            result = result &&
                getUnknownFields().equals(other.getUnknownFields());
    @@ -4077,102 +3573,74 @@ public final class LlapDaemonProtocolProtos {
            }
            int hash = 41;
            hash = (19 * hash) + getDescriptorForType().hashCode();
    - if (hasFragmentIdentifierString()) {
    - hash = (37 * hash) + FRAGMENT_IDENTIFIER_STRING_FIELD_NUMBER;
    - hash = (53 * hash) + getFragmentIdentifierString().hashCode();
    + if (hasApplicationIdString()) {
    + hash = (37 * hash) + APPLICATION_ID_STRING_FIELD_NUMBER;
    + hash = (53 * hash) + getApplicationIdString().hashCode();
            }
    - if (hasDagName()) {
    - hash = (37 * hash) + DAG_NAME_FIELD_NUMBER;
    - hash = (53 * hash) + getDagName().hashCode();
    + if (hasAppAttemptNumber()) {
    + hash = (37 * hash) + APP_ATTEMPT_NUMBER_FIELD_NUMBER;
    + hash = (53 * hash) + getAppAttemptNumber();
            }
            if (hasDagId()) {
              hash = (37 * hash) + DAG_ID_FIELD_NUMBER;
              hash = (53 * hash) + getDagId();
            }
    - if (hasVertexName()) {
    - hash = (37 * hash) + VERTEX_NAME_FIELD_NUMBER;
    - hash = (53 * hash) + getVertexName().hashCode();
    - }
    - if (hasProcessorDescriptor()) {
    - hash = (37 * hash) + PROCESSOR_DESCRIPTOR_FIELD_NUMBER;
    - hash = (53 * hash) + getProcessorDescriptor().hashCode();
    - }
    - if (getInputSpecsCount() > 0) {
    - hash = (37 * hash) + INPUT_SPECS_FIELD_NUMBER;
    - hash = (53 * hash) + getInputSpecsList().hashCode();
    - }
    - if (getOutputSpecsCount() > 0) {
    - hash = (37 * hash) + OUTPUT_SPECS_FIELD_NUMBER;
    - hash = (53 * hash) + getOutputSpecsList().hashCode();
    - }
    - if (getGroupedInputSpecsCount() > 0) {
    - hash = (37 * hash) + GROUPED_INPUT_SPECS_FIELD_NUMBER;
    - hash = (53 * hash) + getGroupedInputSpecsList().hashCode();
    - }
    - if (hasVertexParallelism()) {
    - hash = (37 * hash) + VERTEX_PARALLELISM_FIELD_NUMBER;
    - hash = (53 * hash) + getVertexParallelism();
    - }
    - if (hasFragmentNumber()) {
    - hash = (37 * hash) + FRAGMENT_NUMBER_FIELD_NUMBER;
    - hash = (53 * hash) + getFragmentNumber();
    - }
    - if (hasAttemptNumber()) {
    - hash = (37 * hash) + ATTEMPT_NUMBER_FIELD_NUMBER;
    - hash = (53 * hash) + getAttemptNumber();
    + if (hasVertexId()) {
    + hash = (37 * hash) + VERTEX_ID_FIELD_NUMBER;
    + hash = (53 * hash) + getVertexId();
            }
            hash = (29 * hash) + getUnknownFields().hashCode();
            memoizedHashCode = hash;
            return hash;
          }

    - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(
    + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier parseFrom(
              com.google.protobuf.ByteString data)
              throws com.google.protobuf.InvalidProtocolBufferException {
            return PARSER.parseFrom(data);
          }
    - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(
    + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier parseFrom(
              com.google.protobuf.ByteString data,
              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
              throws com.google.protobuf.InvalidProtocolBufferException {
            return PARSER.parseFrom(data, extensionRegistry);
          }
    - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(byte[] data)
    + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier parseFrom(byte[] data)
              throws com.google.protobuf.InvalidProtocolBufferException {
            return PARSER.parseFrom(data);
          }
    - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(
    + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier parseFrom(
              byte[] data,
              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
              throws com.google.protobuf.InvalidProtocolBufferException {
            return PARSER.parseFrom(data, extensionRegistry);
          }
    - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(java.io.InputStream input)
    + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier parseFrom(java.io.InputStream input)
              throws java.io.IOException {
            return PARSER.parseFrom(input);
          }
    - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(
    + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier parseFrom(
              java.io.InputStream input,
              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
              throws java.io.IOException {
            return PARSER.parseFrom(input, extensionRegistry);
          }
    - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseDelimitedFrom(java.io.InputStream input)
    + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier parseDelimitedFrom(java.io.InputStream input)
              throws java.io.IOException {
            return PARSER.parseDelimitedFrom(input);
          }
    - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseDelimitedFrom(
    + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier parseDelimitedFrom(
              java.io.InputStream input,
              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
              throws java.io.IOException {
            return PARSER.parseDelimitedFrom(input, extensionRegistry);
          }
    - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(
    + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier parseFrom(
              com.google.protobuf.CodedInputStream input)
              throws java.io.IOException {
            return PARSER.parseFrom(input);
          }
    - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parseFrom(
    + public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier parseFrom(
              com.google.protobuf.CodedInputStream input,
              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
              throws java.io.IOException {
    @@ -4181,7 +3649,7 @@ public final class LlapDaemonProtocolProtos {

          public static Builder newBuilder() { return Builder.create(); }
          public Builder newBuilderForType() { return newBuilder(); }
    - public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto prototype) {
    + public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier prototype) {
            return newBuilder().mergeFrom(prototype);
          }
          public Builder toBuilder() { return newBuilder(this); }
    @@ -4193,24 +3661,24 @@ public final class LlapDaemonProtocolProtos {
            return builder;
          }
          /**
    - * Protobuf type {@code FragmentSpecProto}
    + * Protobuf type {@code VertexIdentifier}
           */
          public static final class Builder extends
              com.google.protobuf.GeneratedMessage.Builder<Builder>
    - implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProtoOrBuilder {
    + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifierOrBuilder {
            public static final com.google.protobuf.Descriptors.Descriptor
                getDescriptor() {
    - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentSpecProto_descriptor;
    + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_VertexIdentifier_descriptor;
            }

            protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
                internalGetFieldAccessorTable() {
    - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentSpecProto_fieldAccessorTable
    + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_VertexIdentifier_fieldAccessorTable
                  .ensureFieldAccessorsInitialized(
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.Builder.class);
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier.Builder.class);
            }

    - // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.newBuilder()
    + // Construct using org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier.newBuilder()
            private Builder() {
              maybeForceBuilderInitialization();
            }
    @@ -4222,10 +3690,6 @@ public final class LlapDaemonProtocolProtos {
            }
            private void maybeForceBuilderInitialization() {
              if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
    - getProcessorDescriptorFieldBuilder();
    - getInputSpecsFieldBuilder();
    - getOutputSpecsFieldBuilder();
    - getGroupedInputSpecsFieldBuilder();
              }
            }
            private static Builder create() {
    @@ -4234,44 +3698,14 @@ public final class LlapDaemonProtocolProtos {

            public Builder clear() {
              super.clear();
    - fragmentIdentifierString_ = "";
    + applicationIdString_ = "";
              bitField0_ = (bitField0_ & ~0x00000001);
    - dagName_ = "";
    + appAttemptNumber_ = 0;
              bitField0_ = (bitField0_ & ~0x00000002);
              dagId_ = 0;
              bitField0_ = (bitField0_ & ~0x00000004);
    - vertexName_ = "";
    + vertexId_ = 0;
              bitField0_ = (bitField0_ & ~0x00000008);
    - if (processorDescriptorBuilder_ == null) {
    - processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
    - } else {
    - processorDescriptorBuilder_.clear();
    - }
    - bitField0_ = (bitField0_ & ~0x00000010);
    - if (inputSpecsBuilder_ == null) {
    - inputSpecs_ = java.util.Collections.emptyList();
    - bitField0_ = (bitField0_ & ~0x00000020);
    - } else {
    - inputSpecsBuilder_.clear();
    - }
    - if (outputSpecsBuilder_ == null) {
    - outputSpecs_ = java.util.Collections.emptyList();
    - bitField0_ = (bitField0_ & ~0x00000040);
    - } else {
    - outputSpecsBuilder_.clear();
    - }
    - if (groupedInputSpecsBuilder_ == null) {
    - groupedInputSpecs_ = java.util.Collections.emptyList();
    - bitField0_ = (bitField0_ & ~0x00000080);
    - } else {
    - groupedInputSpecsBuilder_.clear();
    - }
    - vertexParallelism_ = 0;
    - bitField0_ = (bitField0_ & ~0x00000100);
    - fragmentNumber_ = 0;
    - bitField0_ = (bitField0_ & ~0x00000200);
    - attemptNumber_ = 0;
    - bitField0_ = (bitField0_ & ~0x00000400);
              return this;
            }

    @@ -4281,33 +3715,33 @@ public final class LlapDaemonProtocolProtos {

            public com.google.protobuf.Descriptors.Descriptor
                getDescriptorForType() {
    - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentSpecProto_descriptor;
    + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_VertexIdentifier_descriptor;
            }

    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto getDefaultInstanceForType() {
    - return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.getDefaultInstance();
    + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier getDefaultInstanceForType() {
    + return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier.getDefaultInstance();
            }

    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto build() {
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto result = buildPartial();
    + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier build() {
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier result = buildPartial();
              if (!result.isInitialized()) {
                throw newUninitializedMessageException(result);
              }
              return result;
            }

    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto buildPartial() {
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto(this);
    + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier buildPartial() {
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier(this);
              int from_bitField0_ = bitField0_;
              int to_bitField0_ = 0;
              if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
                to_bitField0_ |= 0x00000001;
              }
    - result.fragmentIdentifierString_ = fragmentIdentifierString_;
    + result.applicationIdString_ = applicationIdString_;
              if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
                to_bitField0_ |= 0x00000002;
              }
    - result.dagName_ = dagName_;
    + result.appAttemptNumber_ = appAttemptNumber_;
              if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
                to_bitField0_ |= 0x00000004;
              }
    @@ -4315,177 +3749,36 @@ public final class LlapDaemonProtocolProtos {
              if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
                to_bitField0_ |= 0x00000008;
              }
    - result.vertexName_ = vertexName_;
    - if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
    - to_bitField0_ |= 0x00000010;
    - }
    - if (processorDescriptorBuilder_ == null) {
    - result.processorDescriptor_ = processorDescriptor_;
    - } else {
    - result.processorDescriptor_ = processorDescriptorBuilder_.build();
    - }
    - if (inputSpecsBuilder_ == null) {
    - if (((bitField0_ & 0x00000020) == 0x00000020)) {
    - inputSpecs_ = java.util.Collections.unmodifiableList(inputSpecs_);
    - bitField0_ = (bitField0_ & ~0x00000020);
    - }
    - result.inputSpecs_ = inputSpecs_;
    - } else {
    - result.inputSpecs_ = inputSpecsBuilder_.build();
    - }
    - if (outputSpecsBuilder_ == null) {
    - if (((bitField0_ & 0x00000040) == 0x00000040)) {
    - outputSpecs_ = java.util.Collections.unmodifiableList(outputSpecs_);
    - bitField0_ = (bitField0_ & ~0x00000040);
    - }
    - result.outputSpecs_ = outputSpecs_;
    - } else {
    - result.outputSpecs_ = outputSpecsBuilder_.build();
    - }
    - if (groupedInputSpecsBuilder_ == null) {
    - if (((bitField0_ & 0x00000080) == 0x00000080)) {
    - groupedInputSpecs_ = java.util.Collections.unmodifiableList(groupedInputSpecs_);
    - bitField0_ = (bitField0_ & ~0x00000080);
    - }
    - result.groupedInputSpecs_ = groupedInputSpecs_;
    - } else {
    - result.groupedInputSpecs_ = groupedInputSpecsBuilder_.build();
    - }
    - if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
    - to_bitField0_ |= 0x00000020;
    - }
    - result.vertexParallelism_ = vertexParallelism_;
    - if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
    - to_bitField0_ |= 0x00000040;
    - }
    - result.fragmentNumber_ = fragmentNumber_;
    - if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
    - to_bitField0_ |= 0x00000080;
    - }
    - result.attemptNumber_ = attemptNumber_;
    + result.vertexId_ = vertexId_;
              result.bitField0_ = to_bitField0_;
              onBuilt();
              return result;
            }

            public Builder mergeFrom(com.google.protobuf.Message other) {
    - if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto) {
    - return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto)other);
    + if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier) {
    + return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier)other);
              } else {
                super.mergeFrom(other);
                return this;
              }
            }

    - public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto other) {
    - if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.getDefaultInstance()) return this;
    - if (other.hasFragmentIdentifierString()) {
    + public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier other) {
    + if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier.getDefaultInstance()) return this;
    + if (other.hasApplicationIdString()) {
                bitField0_ |= 0x00000001;
    - fragmentIdentifierString_ = other.fragmentIdentifierString_;
    + applicationIdString_ = other.applicationIdString_;
                onChanged();
              }
    - if (other.hasDagName()) {
    - bitField0_ |= 0x00000002;
    - dagName_ = other.dagName_;
    - onChanged();
    + if (other.hasAppAttemptNumber()) {
    + setAppAttemptNumber(other.getAppAttemptNumber());
              }
              if (other.hasDagId()) {
                setDagId(other.getDagId());
              }
    - if (other.hasVertexName()) {
    - bitField0_ |= 0x00000008;
    - vertexName_ = other.vertexName_;
    - onChanged();
    - }
    - if (other.hasProcessorDescriptor()) {
    - mergeProcessorDescriptor(other.getProcessorDescriptor());
    - }
    - if (inputSpecsBuilder_ == null) {
    - if (!other.inputSpecs_.isEmpty()) {
    - if (inputSpecs_.isEmpty()) {
    - inputSpecs_ = other.inputSpecs_;
    - bitField0_ = (bitField0_ & ~0x00000020);
    - } else {
    - ensureInputSpecsIsMutable();
    - inputSpecs_.addAll(other.inputSpecs_);
    - }
    - onChanged();
    - }
    - } else {
    - if (!other.inputSpecs_.isEmpty()) {
    - if (inputSpecsBuilder_.isEmpty()) {
    - inputSpecsBuilder_.dispose();
    - inputSpecsBuilder_ = null;
    - inputSpecs_ = other.inputSpecs_;
    - bitField0_ = (bitField0_ & ~0x00000020);
    - inputSpecsBuilder_ =
    - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
    - getInputSpecsFieldBuilder() : null;
    - } else {
    - inputSpecsBuilder_.addAllMessages(other.inputSpecs_);
    - }
    - }
    - }
    - if (outputSpecsBuilder_ == null) {
    - if (!other.outputSpecs_.isEmpty()) {
    - if (outputSpecs_.isEmpty()) {
    - outputSpecs_ = other.outputSpecs_;
    - bitField0_ = (bitField0_ & ~0x00000040);
    - } else {
    - ensureOutputSpecsIsMutable();
    - outputSpecs_.addAll(other.outputSpecs_);
    - }
    - onChanged();
    - }
    - } else {
    - if (!other.outputSpecs_.isEmpty()) {
    - if (outputSpecsBuilder_.isEmpty()) {
    - outputSpecsBuilder_.dispose();
    - outputSpecsBuilder_ = null;
    - outputSpecs_ = other.outputSpecs_;
    - bitField0_ = (bitField0_ & ~0x00000040);
    - outputSpecsBuilder_ =
    - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
    - getOutputSpecsFieldBuilder() : null;
    - } else {
    - outputSpecsBuilder_.addAllMessages(other.outputSpecs_);
    - }
    - }
    - }
    - if (groupedInputSpecsBuilder_ == null) {
    - if (!other.groupedInputSpecs_.isEmpty()) {
    - if (groupedInputSpecs_.isEmpty()) {
    - groupedInputSpecs_ = other.groupedInputSpecs_;
    - bitField0_ = (bitField0_ & ~0x00000080);
    - } else {
    - ensureGroupedInputSpecsIsMutable();
    - groupedInputSpecs_.addAll(other.groupedInputSpecs_);
    - }
    - onChanged();
    - }
    - } else {
    - if (!other.groupedInputSpecs_.isEmpty()) {
    - if (groupedInputSpecsBuilder_.isEmpty()) {
    - groupedInputSpecsBuilder_.dispose();
    - groupedInputSpecsBuilder_ = null;
    - groupedInputSpecs_ = other.groupedInputSpecs_;
    - bitField0_ = (bitField0_ & ~0x00000080);
    - groupedInputSpecsBuilder_ =
    - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
    - getGroupedInputSpecsFieldBuilder() : null;
    - } else {
    - groupedInputSpecsBuilder_.addAllMessages(other.groupedInputSpecs_);
    - }
    - }
    - }
    - if (other.hasVertexParallelism()) {
    - setVertexParallelism(other.getVertexParallelism());
    - }
    - if (other.hasFragmentNumber()) {
    - setFragmentNumber(other.getFragmentNumber());
    - }
    - if (other.hasAttemptNumber()) {
    - setAttemptNumber(other.getAttemptNumber());
    + if (other.hasVertexId()) {
    + setVertexId(other.getVertexId());
              }
              this.mergeUnknownFields(other.getUnknownFields());
              return this;
    @@ -4499,11 +3792,11 @@ public final class LlapDaemonProtocolProtos {
                com.google.protobuf.CodedInputStream input,
                com.google.protobuf.ExtensionRegistryLite extensionRegistry)
                throws java.io.IOException {
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto parsedMessage = null;
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier parsedMessage = null;
              try {
                parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
              } catch (com.google.protobuf.InvalidProtocolBufferException e) {
    - parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto) e.getUnfinishedMessage();
    + parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier) e.getUnfinishedMessage();
                throw e;
              } finally {
                if (parsedMessage != null) {
    @@ -4514,170 +3807,129 @@ public final class LlapDaemonProtocolProtos {
            }
            private int bitField0_;

    - // optional string fragment_identifier_string = 1;
    - private java.lang.Object fragmentIdentifierString_ = "";
    + // optional string application_id_string = 1;
    + private java.lang.Object applicationIdString_ = "";
            /**
    - * <code>optional string fragment_identifier_string = 1;</code>
    + * <code>optional string application_id_string = 1;</code>
             */
    - public boolean hasFragmentIdentifierString() {
    + public boolean hasApplicationIdString() {
              return ((bitField0_ & 0x00000001) == 0x00000001);
            }
            /**
    - * <code>optional string fragment_identifier_string = 1;</code>
    + * <code>optional string application_id_string = 1;</code>
             */
    - public java.lang.String getFragmentIdentifierString() {
    - java.lang.Object ref = fragmentIdentifierString_;
    + public java.lang.String getApplicationIdString() {
    + java.lang.Object ref = applicationIdString_;
              if (!(ref instanceof java.lang.String)) {
                java.lang.String s = ((com.google.protobuf.ByteString) ref)
                    .toStringUtf8();
    - fragmentIdentifierString_ = s;
    + applicationIdString_ = s;
                return s;
              } else {
                return (java.lang.String) ref;
              }
            }
            /**
    - * <code>optional string fragment_identifier_string = 1;</code>
    + * <code>optional string application_id_string = 1;</code>
             */
            public com.google.protobuf.ByteString
    - getFragmentIdentifierStringBytes() {
    - java.lang.Object ref = fragmentIdentifierString_;
    + getApplicationIdStringBytes() {
    + java.lang.Object ref = applicationIdString_;
              if (ref instanceof String) {
                com.google.protobuf.ByteString b =
                    com.google.protobuf.ByteString.copyFromUtf8(
                        (java.lang.String) ref);
    - fragmentIdentifierString_ = b;
    + applicationIdString_ = b;
                return b;
              } else {
                return (com.google.protobuf.ByteString) ref;
              }
            }
            /**
    - * <code>optional string fragment_identifier_string = 1;</code>
    + * <code>optional string application_id_string = 1;</code>
             */
    - public Builder setFragmentIdentifierString(
    + public Builder setApplicationIdString(
                java.lang.String value) {
              if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
    - fragmentIdentifierString_ = value;
    + applicationIdString_ = value;
              onChanged();
              return this;
            }
            /**
    - * <code>optional string fragment_identifier_string = 1;</code>
    + * <code>optional string application_id_string = 1;</code>
             */
    - public Builder clearFragmentIdentifierString() {
    + public Builder clearApplicationIdString() {
              bitField0_ = (bitField0_ & ~0x00000001);
    - fragmentIdentifierString_ = getDefaultInstance().getFragmentIdentifierString();
    + applicationIdString_ = getDefaultInstance().getApplicationIdString();
              onChanged();
              return this;
            }
            /**
    - * <code>optional string fragment_identifier_string = 1;</code>
    + * <code>optional string application_id_string = 1;</code>
             */
    - public Builder setFragmentIdentifierStringBytes(
    + public Builder setApplicationIdStringBytes(
                com.google.protobuf.ByteString value) {
              if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
    - fragmentIdentifierString_ = value;
    + applicationIdString_ = value;
              onChanged();
              return this;
            }

    - // optional string dag_name = 2;
    - private java.lang.Object dagName_ = "";
    + // optional int32 app_attempt_number = 2;
    + private int appAttemptNumber_ ;
            /**
    - * <code>optional string dag_name = 2;</code>
    + * <code>optional int32 app_attempt_number = 2;</code>
             */
    - public boolean hasDagName() {
    + public boolean hasAppAttemptNumber() {
              return ((bitField0_ & 0x00000002) == 0x00000002);
            }
            /**
    - * <code>optional string dag_name = 2;</code>
    + * <code>optional int32 app_attempt_number = 2;</code>
             */
    - public java.lang.String getDagName() {
    - java.lang.Object ref = dagName_;
    - if (!(ref instanceof java.lang.String)) {
    - java.lang.String s = ((com.google.protobuf.ByteString) ref)
    - .toStringUtf8();
    - dagName_ = s;
    - return s;
    - } else {
    - return (java.lang.String) ref;
    - }
    + public int getAppAttemptNumber() {
    + return appAttemptNumber_;
            }
            /**
    - * <code>optional string dag_name = 2;</code>
    + * <code>optional int32 app_attempt_number = 2;</code>
             */
    - public com.google.protobuf.ByteString
    - getDagNameBytes() {
    - java.lang.Object ref = dagName_;
    - if (ref instanceof String) {
    - com.google.protobuf.ByteString b =
    - com.google.protobuf.ByteString.copyFromUtf8(
    - (java.lang.String) ref);
    - dagName_ = b;
    - return b;
    - } else {
    - return (com.google.protobuf.ByteString) ref;
    - }
    - }
    - /**
    - * <code>optional string dag_name = 2;</code>
    - */
    - public Builder setDagName(
    - java.lang.String value) {
    - if (value == null) {
    - throw new NullPointerException();
    - }
    - bitField0_ |= 0x00000002;
    - dagName_ = value;
    + public Builder setAppAttemptNumber(int value) {
    + bitField0_ |= 0x00000002;
    + appAttemptNumber_ = value;
              onChanged();
              return this;
            }
            /**
    - * <code>optional string dag_name = 2;</code>
    + * <code>optional int32 app_attempt_number = 2;</code>
             */
    - public Builder clearDagName() {
    + public Builder clearAppAttemptNumber() {
              bitField0_ = (bitField0_ & ~0x00000002);
    - dagName_ = getDefaultInstance().getDagName();
    - onChanged();
    - return this;
    - }
    - /**
    - * <code>optional string dag_name = 2;</code>
    - */
    - public Builder setDagNameBytes(
    - com.google.protobuf.ByteString value) {
    - if (value == null) {
    - throw new NullPointerException();
    - }
    - bitField0_ |= 0x00000002;
    - dagName_ = value;
    + appAttemptNumber_ = 0;
              onChanged();
              return this;
            }

    - // optional int32 dag_id = 11;
    + // optional int32 dag_id = 3;
            private int dagId_ ;
            /**
    - * <code>optional int32 dag_id = 11;</code>
    + * <code>optional int32 dag_id = 3;</code>
             */
            public boolean hasDagId() {
              return ((bitField0_ & 0x00000004) == 0x00000004);
            }
            /**
    - * <code>optional int32 dag_id = 11;</code>
    + * <code>optional int32 dag_id = 3;</code>
             */
            public int getDagId() {
              return dagId_;
            }
            /**
    - * <code>optional int32 dag_id = 11;</code>
    + * <code>optional int32 dag_id = 3;</code>
             */
            public Builder setDagId(int value) {
              bitField0_ |= 0x00000004;
    @@ -4686,7 +3938,7 @@ public final class LlapDaemonProtocolProtos {
              return this;
            }
            /**
    - * <code>optional int32 dag_id = 11;</code>
    + * <code>optional int32 dag_id = 3;</code>
             */
            public Builder clearDagId() {
              bitField0_ = (bitField0_ & ~0x00000004);
    @@ -4695,1025 +3947,3552 @@ public final class LlapDaemonProtocolProtos {
              return this;
            }

    - // optional string vertex_name = 3;
    - private java.lang.Object vertexName_ = "";
    + // optional int32 vertex_id = 4;
    + private int vertexId_ ;
            /**
    - * <code>optional string vertex_name = 3;</code>
    + * <code>optional int32 vertex_id = 4;</code>
             */
    - public boolean hasVertexName() {
    + public boolean hasVertexId() {
              return ((bitField0_ & 0x00000008) == 0x00000008);
            }
            /**
    - * <code>optional string vertex_name = 3;</code>
    - */
    - public java.lang.String getVertexName() {
    - java.lang.Object ref = vertexName_;
    - if (!(ref instanceof java.lang.String)) {
    - java.lang.String s = ((com.google.protobuf.ByteString) ref)
    - .toStringUtf8();
    - vertexName_ = s;
    - return s;
    - } else {
    - return (java.lang.String) ref;
    - }
    - }
    - /**
    - * <code>optional string vertex_name = 3;</code>
    + * <code>optional int32 vertex_id = 4;</code>
             */
    - public com.google.protobuf.ByteString
    - getVertexNameBytes() {
    - java.lang.Object ref = vertexName_;
    - if (ref instanceof String) {
    - com.google.protobuf.ByteString b =
    - com.google.protobuf.ByteString.copyFromUtf8(
    - (java.lang.String) ref);
    - vertexName_ = b;
    - return b;
    - } else {
    - return (com.google.protobuf.ByteString) ref;
    - }
    + public int getVertexId() {
    + return vertexId_;
            }
            /**
    - * <code>optional string vertex_name = 3;</code>
    + * <code>optional int32 vertex_id = 4;</code>
             */
    - public Builder setVertexName(
    - java.lang.String value) {
    - if (value == null) {
    - throw new NullPointerException();
    - }
    - bitField0_ |= 0x00000008;
    - vertexName_ = value;
    + public Builder setVertexId(int value) {
    + bitField0_ |= 0x00000008;
    + vertexId_ = value;
              onChanged();
              return this;
            }
            /**
    - * <code>optional string vertex_name = 3;</code>
    + * <code>optional int32 vertex_id = 4;</code>
             */
    - public Builder clearVertexName() {
    + public Builder clearVertexId() {
              bitField0_ = (bitField0_ & ~0x00000008);
    - vertexName_ = getDefaultInstance().getVertexName();
    + vertexId_ = 0;
              onChanged();
              return this;
            }
    - /**
    - * <code>optional string vertex_name = 3;</code>
    - */
    - public Builder setVertexNameBytes(
    - com.google.protobuf.ByteString value) {
    - if (value == null) {
    - throw new NullPointerException();
    +
    + // @@protoc_insertion_point(builder_scope:VertexIdentifier)
    + }
    +
    + static {
    + defaultInstance = new VertexIdentifier(true);
    + defaultInstance.initFields();
    + }
    +
    + // @@protoc_insertion_point(class_scope:VertexIdentifier)
        }
    - bitField0_ |= 0x00000008;
    - vertexName_ = value;
    - onChanged();
    - return this;
    - }

    - // optional .EntityDescriptorProto processor_descriptor = 4;
    - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
    - private com.google.protobuf.SingleFieldBuilder<
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder> processorDescriptorBuilder_;
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - public boolean hasProcessorDescriptor() {
    - return ((bitField0_ & 0x00000010) == 0x00000010);
    - }
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getProcessorDescriptor() {
    - if (processorDescriptorBuilder_ == null) {
    - return processorDescriptor_;
    - } else {
    - return processorDescriptorBuilder_.getMessage();
    - }
    - }
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - public Builder setProcessorDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) {
    - if (processorDescriptorBuilder_ == null) {
    - if (value == null) {
    - throw new NullPointerException();
    - }
    - processorDescriptor_ = value;
    - onChanged();
    - } else {
    - processorDescriptorBuilder_.setMessage(value);
    - }
    - bitField0_ |= 0x00000010;
    - return this;
    - }
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - public Builder setProcessorDescriptor(
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder builderForValue) {
    - if (processorDescriptorBuilder_ == null) {
    - processorDescriptor_ = builderForValue.build();
    - onChanged();
    - } else {
    - processorDescriptorBuilder_.setMessage(builderForValue.build());
    - }
    - bitField0_ |= 0x00000010;
    - return this;
    - }
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - public Builder mergeProcessorDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) {
    - if (processorDescriptorBuilder_ == null) {
    - if (((bitField0_ & 0x00000010) == 0x00000010) &&
    - processorDescriptor_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance()) {
    - processorDescriptor_ =
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder(processorDescriptor_).mergeFrom(value).buildPartial();
    - } else {
    - processorDescriptor_ = value;
    - }
    - onChanged();
    - } else {
    - processorDescriptorBuilder_.mergeFrom(value);
    - }
    - bitField0_ |= 0x00000010;
    - return this;
    - }
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - public Builder clearProcessorDescriptor() {
    - if (processorDescriptorBuilder_ == null) {
    - processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance();
    - onChanged();
    - } else {
    - processorDescriptorBuilder_.clear();
    - }
    - bitField0_ = (bitField0_ & ~0x00000010);
    - return this;
    - }
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder getProcessorDescriptorBuilder() {
    - bitField0_ |= 0x00000010;
    - onChanged();
    - return getProcessorDescriptorFieldBuilder().getBuilder();
    - }
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getProcessorDescriptorOrBuilder() {
    - if (processorDescriptorBuilder_ != null) {
    - return processorDescriptorBuilder_.getMessageOrBuilder();
    - } else {
    - return processorDescriptor_;
    - }
    - }
    - /**
    - * <code>optional .EntityDescriptorProto processor_descriptor = 4;</code>
    - */
    - private com.google.protobuf.SingleFieldBuilder<
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder>
    - getProcessorDescriptorFieldBuilder() {
    - if (processorDescriptorBuilder_ == null) {
    - processorDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder>(
    - processorDescriptor_,
    - getParentForChildren(),
    - isClean());
    - processorDescriptor_ = null;
    - }
    - return processorDescriptorBuilder_;
    - }
    + public interface SignableVertexSpecOrBuilder
    + extends com.google.protobuf.MessageOrBuilder {

    - // repeated .IOSpecProto input_specs = 5;
    - private java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto> inputSpecs_ =
    - java.util.Collections.emptyList();
    - private void ensureInputSpecsIsMutable() {
    - if (!((bitField0_ & 0x00000020) == 0x00000020)) {
    - inputSpecs_ = new java.util.ArrayList<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto>(inputSpecs_);
    - bitField0_ |= 0x00000020;
    - }
    - }
    + // optional string user = 1;
    + /**
    + * <code>optional string user = 1;</code>
    + */
    + boolean hasUser();
    + /**
    + * <code>optional string user = 1;</code>
    + */
    + java.lang.String getUser();
    + /**
    + * <code>optional string user = 1;</code>
    + */
    + com.google.protobuf.ByteString
    + getUserBytes();

    - private com.google.protobuf.RepeatedFieldBuilder<
    - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder> inputSpecsBuilder_;
    + // optional int64 signatureKeyId = 2;
    + /**
    + * <code>optional int64 signatureKeyId = 2;</code>
    + */
    + boolean hasSignatureKeyId();
    + /**
    + * <code>optional int64 signatureKeyId = 2;</code>
    + */
    + long getSignatureKeyId();

    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - public java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto> getInputSpecsList() {
    - if (inputSpecsBuilder_ == null) {
    - return java.util.Collections.unmodifiableList(inputSpecs_);
    - } else {
    - return inputSpecsBuilder_.getMessageList();
    - }
    - }
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - public int getInputSpecsCount() {
    - if (inputSpecsBuilder_ == null) {
    - return inputSpecs_.size();
    - } else {
    - return inputSpecsBuilder_.getCount();
    - }
    - }
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getInputSpecs(int index) {
    - if (inputSpecsBuilder_ == null) {
    - return inputSpecs_.get(index);
    - } else {
    - return inputSpecsBuilder_.getMessage(index);
    - }
    - }
    - /**
    - * <code>repeated .IOSpecProto input_specs = 5;</code>
    - */
    - public Builder setInputSpecs(
    - int index, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto value) {
    - if (inputSpecsBuilder_ == null) {
    + // optional .VertexIdentifier vertexIdentifier = 3;
    + /**
    + * <code>optional .VertexIdentifier vertexIdentifier = 3;</code>
    + */
    + boolean hasVertexIdentifier();
    + /**
    + * <code>optional .VertexIdentifier vertexIdentifier = 3;</code>
    + */
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier getVertexIdentifier();
    + /**
    + * <code>optional .VertexIdentifier vertexIdentifier = 3;</code>
    + */
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifierOrBuilder getVertexIdentifierOrBuilder();
    +
    + // optional string dag_name = 4;
    + /**
    + * <code>optional string dag_name = 4;</code>
    + *
    + * <pre>
    + * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
    + * </pre>
    + */
    + boolean hasDagName();
    + /**
    + * <code>optional string dag_name = 4;</code>
    + *
    + * <pre>
    + * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
    + * </pre>
    + */
    + java.lang.String getDagName();
    + /**
    + * <code>optional string dag_name = 4;</code>
    + *
    + * <pre>
    + * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
    + * </pre>
    + */
    + com.google.protobuf.ByteString
    + getDagNameBytes();
    +
    + // optional string vertex_name = 5;
    + /**
    + * <code>optional string vertex_name = 5;</code>
    + */
    + boolean hasVertexName();
    + /**
    + * <code>optional string vertex_name = 5;</code>
    + */
    + java.lang.String getVertexName();
    + /**
    + * <code>optional string vertex_name = 5;</code>
    + */
    + com.google.protobuf.ByteString
    + getVertexNameBytes();
    +
    + // optional string token_identifier = 6;
    + /**
    + * <code>optional string token_identifier = 6;</code>
    + *
    + * <pre>
    + * The core vertex stuff
    + * </pre>
    + */
    + boolean hasTokenIdentifier();
    + /**
    + * <code>optional string token_identifier = 6;</code>
    + *
    + * <pre>
    + * The core vertex stuff
    + * </pre>
    + */
    + java.lang.String getTokenIdentifier();
    + /**
    + * <code>optional string token_identifier = 6;</code>
    + *
    + * <pre>
    + * The core vertex stuff
    + * </pre>
    + */
    + com.google.protobuf.ByteString
    + getTokenIdentifierBytes();
    +
    + // optional .EntityDescriptorProto processor_descriptor = 7;
    + /**
    + * <code>optional .EntityDescriptorProto processor_descriptor = 7;</code>
    + */
    + boolean hasProcessorDescriptor();
    + /**
    + * <code>optional .EntityDescriptorProto processor_descriptor = 7;</code>
    + */
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getProcessorDescriptor();
    + /**
    + * <code>optional .EntityDescriptorProto processor_descriptor = 7;</code>
    + */
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getProcessorDescriptorOrBuilder();
    +
    + // repeated .IOSpecProto input_specs = 8;
    + /**
    + * <code>repeated .IOSpecProto input_specs = 8;</code>
    + */
    + java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto>
    + getInputSpecsList();
    + /**
    + * <code>repeated .IOSpecProto input_specs = 8;</code>
    + */
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getInputSpecs(int index);
    + /**
    + * <code>repeated .IOSpecProto input_specs = 8;</code>
    + */
    + int getInputSpecsCount();
    + /**
    + * <code>repeated .IOSpecProto input_specs = 8;</code>
    + */
    + java.util.List<? extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
    + getInputSpecsOrBuilderList();
    + /**
    + * <code>repeated .IOSpecProto input_specs = 8;</code>
    + */
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getInputSpecsOrBuilder(
    + int index);
    +
    + // repeated .IOSpecProto output_specs = 9;
    + /**
    + * <code>repeated .IOSpecProto output_specs = 9;</code>
    + */
    + java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto>
    + getOutputSpecsList();
    + /**
    + * <code>repeated .IOSpecProto output_specs = 9;</code>
    + */
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getOutputSpecs(int index);
    + /**
    + * <code>repeated .IOSpecProto output_specs = 9;</code>
    + */
    + int getOutputSpecsCount();
    + /**
    + * <code>repeated .IOSpecProto output_specs = 9;</code>
    + */
    + java.util.List<? extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>
    + getOutputSpecsOrBuilderList();
    + /**
    + * <code>repeated .IOSpecProto output_specs = 9;</code>
    + */
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getOutputSpecsOrBuilder(
    + int index);
    +
    + // repeated .GroupInputSpecProto grouped_input_specs = 10;
    + /**
    + * <code>repeated .GroupInputSpecProto grouped_input_specs = 10;</code>
    + */
    + java.util.List<org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto>
    + getGroupedInputSpecsList();
    + /**
    + * <code>repeated .GroupInputSpecProto grouped_input_specs = 10;</code>
    + */
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto getGroupedInputSpecs(int index);
    + /**
    + * <code>repeated .GroupInputSpecProto grouped_input_specs = 10;</code>
    + */
    + int getGroupedInputSpecsCount();
    + /**
    + * <code>repeated .GroupInputSpecProto grouped_input_specs = 10;</code>
    + */
    + java.util.List<? extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder>
    + getGroupedInputSpecsOrBuilderList();
    + /**
    + * <code>repeated .GroupInputSpecProto grouped_input_specs = 10;</code>
    + */
    + org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder getGroupedInputSpecsOrBuilder(
    + int index);
    +
    + // optional int32 vertex_parallelism = 11;
    + /**
    + * <code>optional int32 vertex_parallelism = 11;</code>
    + *
    + * <pre>
    + * An internal field required for Tez.
    + * </pre>
    + */
    + boolean hasVertexParallelism();
    + /**
    + * <code>optional int32 vertex_parallelism = 11;</code>
    + *
    + * <pre>
    + * An internal field required for Tez.
    + * </pre>
    + */
    + int getVertexParallelism();
    + }
    + /**
    + * Protobuf type {@code SignableVertexSpec}
    + *
    + * <pre>
    + * The part of SubmitWork that can be signed
    + * </pre>
    + */
    + public static final class SignableVertexSpec extends
    + com.google.protobuf.GeneratedMessage
    + implements SignableVertexSpecOrBuilder {
    + // Use SignableVertexSpec.newBuilder() to construct.
    + private SignableVertexSpec(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
    + super(builder);
    + this.unknownFields = builder.getUnknownFields();
    + }
    + private SignableVertexSpec(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
    +
    + private static final SignableVertexSpec defaultInstance;
    + public static SignableVertexSpec getDefaultInstance() {
    + return defaultInstance;
    + }
    +
    + public SignableVertexSpec getDefaultInstanceForType() {
    + return defaultInstance;
    + }
    +
    + private final com.google.protobuf.UnknownFieldSet unknownFields;
    + @java.lang.Override
    + public final com.google.protobuf.UnknownFieldSet
    + getUnknownFields() {
    + return this.unknownFields;
    + }
    + private SignableVertexSpec(
    + com.google.protobuf.CodedInputStream input,
    + com.google.protobuf.ExtensionRegistryLite extensionRegistry)
    + throws com.google.protobuf.InvalidProtocolBufferException {
    + initFields();
    + int mutable_bitField0_ = 0;
    + c

    <TRUNCATED>
  • Spena at May 6, 2016 at 8:42 pm
    HIVE-13445 : LLAP: token should encode application and cluster ids (Sergey Shelukhin, reviewed by Siddharth Seth)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/868e5e14
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/868e5e14
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/868e5e14

    Branch: refs/heads/java8
    Commit: 868e5e141856ce75af48d854d9e3eb13372d11f4
    Parents: b621827
    Author: Sergey Shelukhin <sershe@apache.org>
    Authored: Tue May 3 12:01:32 2016 -0700
    Committer: Sergey Shelukhin <sershe@apache.org>
    Committed: Tue May 3 13:38:03 2016 -0700

    ----------------------------------------------------------------------
      .../daemon/rpc/LlapDaemonProtocolProtos.java | 209 +++++++++++++++++--
      .../org/apache/hadoop/hive/llap/DaemonId.java | 41 ++++
      .../hive/llap/security/LlapTokenIdentifier.java | 39 +++-
      .../hive/llap/security/LlapTokenProvider.java | 2 +-
      .../src/protobuf/LlapDaemonProtocol.proto | 1 +
      .../hive/llap/daemon/ContainerRunner.java | 9 +-
      .../llap/daemon/impl/ContainerRunnerImpl.java | 47 +++--
      .../hive/llap/daemon/impl/LlapDaemon.java | 52 ++++-
      .../daemon/impl/LlapProtocolServerImpl.java | 41 ++--
      .../hive/llap/daemon/impl/LlapTokenChecker.java | 137 ++++++++++++
      .../hadoop/hive/llap/daemon/impl/QueryInfo.java | 17 +-
      .../hive/llap/daemon/impl/QueryTracker.java | 85 +++++---
      .../hadoop/hive/llap/daemon/impl/Scheduler.java | 2 +
      .../llap/daemon/impl/TaskExecutorService.java | 9 +
      .../hive/llap/security/LlapSecurityHelper.java | 15 +-
      .../hive/llap/security/SecretManager.java | 19 +-
      .../hive/llap/daemon/MiniLlapCluster.java | 2 +-
      .../daemon/impl/TaskExecutorTestHelpers.java | 2 +-
      .../impl/TestLlapDaemonProtocolServerImpl.java | 2 +-
      .../llap/daemon/impl/TestLlapTokenChecker.java | 96 +++++++++
      .../hive/ql/exec/tez/TezSessionState.java | 3 +-
      21 files changed, 702 insertions(+), 128 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
    ----------------------------------------------------------------------
    diff --git a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
    index 4ab7b32..820f6be 100644
    --- a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
    +++ b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
    @@ -12821,6 +12821,21 @@ public final class LlapDaemonProtocolProtos {

        public interface GetTokenRequestProtoOrBuilder
            extends com.google.protobuf.MessageOrBuilder {
    +
    + // optional string app_id = 1;
    + /**
    + * <code>optional string app_id = 1;</code>
    + */
    + boolean hasAppId();
    + /**
    + * <code>optional string app_id = 1;</code>
    + */
    + java.lang.String getAppId();
    + /**
    + * <code>optional string app_id = 1;</code>
    + */
    + com.google.protobuf.ByteString
    + getAppIdBytes();
        }
        /**
         * Protobuf type {@code GetTokenRequestProto}
    @@ -12855,6 +12870,7 @@ public final class LlapDaemonProtocolProtos {
              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
              throws com.google.protobuf.InvalidProtocolBufferException {
            initFields();
    + int mutable_bitField0_ = 0;
            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
                com.google.protobuf.UnknownFieldSet.newBuilder();
            try {
    @@ -12872,6 +12888,11 @@ public final class LlapDaemonProtocolProtos {
                    }
                    break;
                  }
    + case 10: {
    + bitField0_ |= 0x00000001;
    + appId_ = input.readBytes();
    + break;
    + }
                }
              }
            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
    @@ -12911,7 +12932,52 @@ public final class LlapDaemonProtocolProtos {
            return PARSER;
          }

    + private int bitField0_;
    + // optional string app_id = 1;
    + public static final int APP_ID_FIELD_NUMBER = 1;
    + private java.lang.Object appId_;
    + /**
    + * <code>optional string app_id = 1;</code>
    + */
    + public boolean hasAppId() {
    + return ((bitField0_ & 0x00000001) == 0x00000001);
    + }
    + /**
    + * <code>optional string app_id = 1;</code>
    + */
    + public java.lang.String getAppId() {
    + java.lang.Object ref = appId_;
    + if (ref instanceof java.lang.String) {
    + return (java.lang.String) ref;
    + } else {
    + com.google.protobuf.ByteString bs =
    + (com.google.protobuf.ByteString) ref;
    + java.lang.String s = bs.toStringUtf8();
    + if (bs.isValidUtf8()) {
    + appId_ = s;
    + }
    + return s;
    + }
    + }
    + /**
    + * <code>optional string app_id = 1;</code>
    + */
    + public com.google.protobuf.ByteString
    + getAppIdBytes() {
    + java.lang.Object ref = appId_;
    + if (ref instanceof java.lang.String) {
    + com.google.protobuf.ByteString b =
    + com.google.protobuf.ByteString.copyFromUtf8(
    + (java.lang.String) ref);
    + appId_ = b;
    + return b;
    + } else {
    + return (com.google.protobuf.ByteString) ref;
    + }
    + }
    +
          private void initFields() {
    + appId_ = "";
          }
          private byte memoizedIsInitialized = -1;
          public final boolean isInitialized() {
    @@ -12925,6 +12991,9 @@ public final class LlapDaemonProtocolProtos {
          public void writeTo(com.google.protobuf.CodedOutputStream output)
                              throws java.io.IOException {
            getSerializedSize();
    + if (((bitField0_ & 0x00000001) == 0x00000001)) {
    + output.writeBytes(1, getAppIdBytes());
    + }
            getUnknownFields().writeTo(output);
          }

    @@ -12934,6 +13003,10 @@ public final class LlapDaemonProtocolProtos {
            if (size != -1) return size;

            size = 0;
    + if (((bitField0_ & 0x00000001) == 0x00000001)) {
    + size += com.google.protobuf.CodedOutputStream
    + .computeBytesSize(1, getAppIdBytes());
    + }
            size += getUnknownFields().getSerializedSize();
            memoizedSerializedSize = size;
            return size;
    @@ -12957,6 +13030,11 @@ public final class LlapDaemonProtocolProtos {
            org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto) obj;

            boolean result = true;
    + result = result && (hasAppId() == other.hasAppId());
    + if (hasAppId()) {
    + result = result && getAppId()
    + .equals(other.getAppId());
    + }
            result = result &&
                getUnknownFields().equals(other.getUnknownFields());
            return result;
    @@ -12970,6 +13048,10 @@ public final class LlapDaemonProtocolProtos {
            }
            int hash = 41;
            hash = (19 * hash) + getDescriptorForType().hashCode();
    + if (hasAppId()) {
    + hash = (37 * hash) + APP_ID_FIELD_NUMBER;
    + hash = (53 * hash) + getAppId().hashCode();
    + }
            hash = (29 * hash) + getUnknownFields().hashCode();
            memoizedHashCode = hash;
            return hash;
    @@ -13079,6 +13161,8 @@ public final class LlapDaemonProtocolProtos {

            public Builder clear() {
              super.clear();
    + appId_ = "";
    + bitField0_ = (bitField0_ & ~0x00000001);
              return this;
            }

    @@ -13105,6 +13189,13 @@ public final class LlapDaemonProtocolProtos {

            public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto buildPartial() {
              org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto(this);
    + int from_bitField0_ = bitField0_;
    + int to_bitField0_ = 0;
    + if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
    + to_bitField0_ |= 0x00000001;
    + }
    + result.appId_ = appId_;
    + result.bitField0_ = to_bitField0_;
              onBuilt();
              return result;
            }
    @@ -13120,6 +13211,11 @@ public final class LlapDaemonProtocolProtos {

            public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto other) {
              if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto.getDefaultInstance()) return this;
    + if (other.hasAppId()) {
    + bitField0_ |= 0x00000001;
    + appId_ = other.appId_;
    + onChanged();
    + }
              this.mergeUnknownFields(other.getUnknownFields());
              return this;
            }
    @@ -13145,6 +13241,81 @@ public final class LlapDaemonProtocolProtos {
              }
              return this;
            }
    + private int bitField0_;
    +
    + // optional string app_id = 1;
    + private java.lang.Object appId_ = "";
    + /**
    + * <code>optional string app_id = 1;</code>
    + */
    + public boolean hasAppId() {
    + return ((bitField0_ & 0x00000001) == 0x00000001);
    + }
    + /**
    + * <code>optional string app_id = 1;</code>
    + */
    + public java.lang.String getAppId() {
    + java.lang.Object ref = appId_;
    + if (!(ref instanceof java.lang.String)) {
    + java.lang.String s = ((com.google.protobuf.ByteString) ref)
    + .toStringUtf8();
    + appId_ = s;
    + return s;
    + } else {
    + return (java.lang.String) ref;
    + }
    + }
    + /**
    + * <code>optional string app_id = 1;</code>
    + */
    + public com.google.protobuf.ByteString
    + getAppIdBytes() {
    + java.lang.Object ref = appId_;
    + if (ref instanceof String) {
    + com.google.protobuf.ByteString b =
    + com.google.protobuf.ByteString.copyFromUtf8(
    + (java.lang.String) ref);
    + appId_ = b;
    + return b;
    + } else {
    + return (com.google.protobuf.ByteString) ref;
    + }
    + }
    + /**
    + * <code>optional string app_id = 1;</code>
    + */
    + public Builder setAppId(
    + java.lang.String value) {
    + if (value == null) {
    + throw new NullPointerException();
    + }
    + bitField0_ |= 0x00000001;
    + appId_ = value;
    + onChanged();
    + return this;
    + }
    + /**
    + * <code>optional string app_id = 1;</code>
    + */
    + public Builder clearAppId() {
    + bitField0_ = (bitField0_ & ~0x00000001);
    + appId_ = getDefaultInstance().getAppId();
    + onChanged();
    + return this;
    + }
    + /**
    + * <code>optional string app_id = 1;</code>
    + */
    + public Builder setAppIdBytes(
    + com.google.protobuf.ByteString value) {
    + if (value == null) {
    + throw new NullPointerException();
    + }
    + bitField0_ |= 0x00000001;
    + appId_ = value;
    + onChanged();
    + return this;
    + }

            // @@protoc_insertion_point(builder_scope:GetTokenRequestProto)
          }
    @@ -14414,24 +14585,24 @@ public final class LlapDaemonProtocolProtos {
            "RequestProto\022/\n\020query_identifier\030\001 \001(\0132\025" +
            ".QueryIdentifierProto\022\"\n\032fragment_identi" +
            "fier_string\030\002 \001(\t\" \n\036TerminateFragmentRe" +
    - "sponseProto\"\026\n\024GetTokenRequestProto\"&\n\025G",
    - "etTokenResponseProto\022\r\n\005token\030\001 \001(\014*2\n\020S" +
    - "ourceStateProto\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RU" +
    - "NNING\020\002*E\n\024SubmissionStateProto\022\014\n\010ACCEP" +
    - "TED\020\001\022\014\n\010REJECTED\020\002\022\021\n\rEVICTED_OTHER\020\0032\316" +
    - "\002\n\022LlapDaemonProtocol\022?\n\nsubmitWork\022\027.Su" +
    - "bmitWorkRequestProto\032\030.SubmitWorkRespons" +
    - "eProto\022W\n\022sourceStateUpdated\022\037.SourceSta" +
    - "teUpdatedRequestProto\032 .SourceStateUpdat" +
    - "edResponseProto\022H\n\rqueryComplete\022\032.Query" +
    - "CompleteRequestProto\032\033.QueryCompleteResp",
    - "onseProto\022T\n\021terminateFragment\022\036.Termina" +
    - "teFragmentRequestProto\032\037.TerminateFragme" +
    - "ntResponseProto2]\n\026LlapManagementProtoco" +
    - "l\022C\n\022getDelegationToken\022\025.GetTokenReques" +
    - "tProto\032\026.GetTokenResponseProtoBH\n&org.ap" +
    - "ache.hadoop.hive.llap.daemon.rpcB\030LlapDa" +
    - "emonProtocolProtos\210\001\001\240\001\001"
    + "sponseProto\"&\n\024GetTokenRequestProto\022\016\n\006a",
    + "pp_id\030\001 \001(\t\"&\n\025GetTokenResponseProto\022\r\n\005" +
    + "token\030\001 \001(\014*2\n\020SourceStateProto\022\017\n\013S_SUC" +
    + "CEEDED\020\001\022\r\n\tS_RUNNING\020\002*E\n\024SubmissionSta" +
    + "teProto\022\014\n\010ACCEPTED\020\001\022\014\n\010REJECTED\020\002\022\021\n\rE" +
    + "VICTED_OTHER\020\0032\316\002\n\022LlapDaemonProtocol\022?\n" +
    + "\nsubmitWork\022\027.SubmitWorkRequestProto\032\030.S" +
    + "ubmitWorkResponseProto\022W\n\022sourceStateUpd" +
    + "ated\022\037.SourceStateUpdatedRequestProto\032 ." +
    + "SourceStateUpdatedResponseProto\022H\n\rquery" +
    + "Complete\022\032.QueryCompleteRequestProto\032\033.Q",
    + "ueryCompleteResponseProto\022T\n\021terminateFr" +
    + "agment\022\036.TerminateFragmentRequestProto\032\037" +
    + ".TerminateFragmentResponseProto2]\n\026LlapM" +
    + "anagementProtocol\022C\n\022getDelegationToken\022" +
    + "\025.GetTokenRequestProto\032\026.GetTokenRespons" +
    + "eProtoBH\n&org.apache.hadoop.hive.llap.da" +
    + "emon.rpcB\030LlapDaemonProtocolProtos\210\001\001\240\001\001"
          };
          com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
            new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
    @@ -14533,7 +14704,7 @@ public final class LlapDaemonProtocolProtos {
                internal_static_GetTokenRequestProto_fieldAccessorTable = new
                  com.google.protobuf.GeneratedMessage.FieldAccessorTable(
                    internal_static_GetTokenRequestProto_descriptor,
    - new java.lang.String[] { });
    + new java.lang.String[] { "AppId", });
                internal_static_GetTokenResponseProto_descriptor =
                  getDescriptor().getMessageTypes().get(16);
                internal_static_GetTokenResponseProto_fieldAccessorTable = new

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-common/src/java/org/apache/hadoop/hive/llap/DaemonId.java
    ----------------------------------------------------------------------
    diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/DaemonId.java b/llap-common/src/java/org/apache/hadoop/hive/llap/DaemonId.java
    new file mode 100644
    index 0000000..18355e6
    --- /dev/null
    +++ b/llap-common/src/java/org/apache/hadoop/hive/llap/DaemonId.java
    @@ -0,0 +1,41 @@
    +/*
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.hadoop.hive.llap;
    +
    +public class DaemonId {
    + private final String userName;
    + private final String clusterName;
    + private final String appId;
    + private final String hostName;
    + private final long startTime;
    +
    + public DaemonId(String userName, String clusterName, String hostName, String appId,
    + long startTime) {
    + this.userName = userName;
    + this.clusterName = clusterName;
    + this.appId = appId;
    + this.hostName = hostName;
    + this.startTime = startTime;
    + // TODO: we could also get an unique number per daemon.
    + }
    +
    + public String getClusterString() {
    + return userName + "_" + clusterName + "_" + appId;
    + }
    +
    + public String getApplicationId() {
    + return appId;
    + }
    +}
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenIdentifier.java
    ----------------------------------------------------------------------
    diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenIdentifier.java b/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenIdentifier.java
    index 23980d0..e28eddd 100644
    --- a/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenIdentifier.java
    +++ b/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenIdentifier.java
    @@ -22,6 +22,7 @@ import java.io.DataInput;
      import java.io.DataOutput;
      import java.io.IOException;

    +import org.apache.commons.lang.StringUtils;
      import org.apache.hadoop.classification.InterfaceAudience;
      import org.apache.hadoop.io.Text;
      import org.apache.hadoop.security.token.Token;
    @@ -31,25 +32,32 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdenti
      public class LlapTokenIdentifier extends AbstractDelegationTokenIdentifier {
        private static final String KIND = "LLAP_TOKEN";
        public static final Text KIND_NAME = new Text(KIND);
    + private String clusterId;
    + private String appId;

        public LlapTokenIdentifier() {
          super();
        }

    - public LlapTokenIdentifier(Text owner, Text renewer, Text realUser) {
    + public LlapTokenIdentifier(Text owner, Text renewer, Text realUser,
    + String clusterId, String appId) {
          super(owner, renewer, realUser);
    + this.clusterId = clusterId;
    + this.appId = appId == null ? "" : appId;
        }

        @Override
        public void write(DataOutput out) throws IOException {
          super.write(out);
    - // Nothing right now.
    + out.writeUTF(clusterId);
    + out.writeUTF(appId);
        }

        @Override
        public void readFields(DataInput in) throws IOException {
          super.readFields(in);
    - // Nothing right now.
    + clusterId = in.readUTF();
    + appId = in.readUTF();
        }

        @Override
    @@ -57,21 +65,34 @@ public class LlapTokenIdentifier extends AbstractDelegationTokenIdentifier {
          return KIND_NAME;
        }

    + public String getAppId() {
    + return appId;
    + }
    +
    + public String getClusterId() {
    + return clusterId;
    + }
    +
        @Override
        public int hashCode() {
    - // Nothing else right now.
    - return super.hashCode();
    + final int prime = 31;
    + int result = prime * super.hashCode() + ((appId == null) ? 0 : appId.hashCode());
    + return prime * result + ((clusterId == null) ? 0 : clusterId.hashCode());
        }

        @Override
    - public boolean equals(Object other) {
    - // Nothing else right now.
    - return super.equals(other);
    + public boolean equals(Object obj) {
    + if (this == obj) return true;
    + if (!(obj instanceof LlapTokenIdentifier) || !super.equals(obj)) return false;
    + LlapTokenIdentifier other = (LlapTokenIdentifier) obj;
    + return (appId == null ? other.appId == null : appId.equals(other.appId))
    + && (clusterId == null ? other.clusterId == null : clusterId.equals(other.clusterId));
        }

        @Override
        public String toString() {
    - return KIND + "; " + super.toString();
    + return KIND + "; " + super.toString() + ", cluster " + clusterId + ", app secret hash "
    + + (StringUtils.isBlank(appId) ? 0 : appId.hashCode());
        }

        @InterfaceAudience.Private

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenProvider.java
    ----------------------------------------------------------------------
    diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenProvider.java b/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenProvider.java
    index 2e99a28..edf9b18 100644
    --- a/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenProvider.java
    +++ b/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapTokenProvider.java
    @@ -23,5 +23,5 @@ import java.io.IOException;
      import org.apache.hadoop.security.token.Token;

      public interface LlapTokenProvider {
    - Token<LlapTokenIdentifier> getDelegationToken() throws IOException;
    + Token<LlapTokenIdentifier> getDelegationToken(String appId) throws IOException;
      }
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-common/src/protobuf/LlapDaemonProtocol.proto
    ----------------------------------------------------------------------
    diff --git a/llap-common/src/protobuf/LlapDaemonProtocol.proto b/llap-common/src/protobuf/LlapDaemonProtocol.proto
    index 944c96c..5cdc02e 100644
    --- a/llap-common/src/protobuf/LlapDaemonProtocol.proto
    +++ b/llap-common/src/protobuf/LlapDaemonProtocol.proto
    @@ -130,6 +130,7 @@ message TerminateFragmentResponseProto {
      }

      message GetTokenRequestProto {
    + optional string app_id = 1;
      }

      message GetTokenResponseProto {

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
    index fc29371..c346aed 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/ContainerRunner.java
    @@ -29,9 +29,12 @@ public interface ContainerRunner {

        SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws IOException;

    - SourceStateUpdatedResponseProto sourceStateUpdated(SourceStateUpdatedRequestProto request);
    + SourceStateUpdatedResponseProto sourceStateUpdated(
    + SourceStateUpdatedRequestProto request) throws IOException;

    - QueryCompleteResponseProto queryComplete(QueryCompleteRequestProto request);
    + QueryCompleteResponseProto queryComplete(
    + QueryCompleteRequestProto request) throws IOException;

    - TerminateFragmentResponseProto terminateFragment(TerminateFragmentRequestProto request);
    + TerminateFragmentResponseProto terminateFragment(
    + TerminateFragmentRequestProto request) throws IOException;
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    index 3d45c7a..78b37f7 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    @@ -92,7 +92,7 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
            boolean enablePreemption, String[] localDirsBase, AtomicReference<Integer> localShufflePort,
            AtomicReference<InetSocketAddress> localAddress,
            long totalMemoryAvailableBytes, LlapDaemonExecutorMetrics metrics,
    - AMReporter amReporter, ClassLoader classLoader) {
    + AMReporter amReporter, ClassLoader classLoader, String clusterId) {
          super("ContainerRunnerImpl");
          this.conf = conf;
          Preconditions.checkState(numExecutors > 0,
    @@ -101,7 +101,7 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
          this.localShufflePort = localShufflePort;
          this.amReporter = amReporter;

    - this.queryTracker = new QueryTracker(conf, localDirsBase);
    + this.queryTracker = new QueryTracker(conf, localDirsBase, clusterId);
          addIfService(queryTracker);
          String waitQueueSchedulerClassName = HiveConf.getVar(
              conf, ConfVars.LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME);
    @@ -175,7 +175,8 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
                fragmentSpec.getFragmentIdentifierString());
            int dagIdentifier = taskAttemptId.getTaskID().getVertexID().getDAGId().getId();

    - QueryIdentifier queryIdentifier = new QueryIdentifier(request.getApplicationIdString(), dagIdentifier);
    + QueryIdentifier queryIdentifier = new QueryIdentifier(
    + request.getApplicationIdString(), dagIdentifier);

            Credentials credentials = new Credentials();
            DataInputBuffer dib = new DataInputBuffer();
    @@ -193,6 +194,7 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
                    fragmentSpec.getAttemptNumber(), request.getUser(), request.getFragmentSpec(),
                    jobToken);

    +
            String[] localDirs = fragmentInfo.getLocalDirs();
            Preconditions.checkNotNull(localDirs);
            if (LOG.isDebugEnabled()) {
    @@ -200,7 +202,6 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
            }
            // May need to setup localDir for re-localization, which is usually setup as Environment.PWD.
            // Used for re-localization, to add the user specified configuration (conf_pb_binary_stream)
    -
            TaskRunnerCallable callable = new TaskRunnerCallable(request, fragmentInfo, new Configuration(getConfig()),
                new LlapExecutionContext(localAddress.get().getHostName(), queryTracker), env,
                credentials, memoryPerExecutor, amReporter, confParams, metrics, killedTaskHandler,
    @@ -248,24 +249,23 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu

        @Override
        public SourceStateUpdatedResponseProto sourceStateUpdated(
    - SourceStateUpdatedRequestProto request) {
    + SourceStateUpdatedRequestProto request) throws IOException {
          LOG.info("Processing state update: " + stringifySourceStateUpdateRequest(request));
    - queryTracker.registerSourceStateChange(
    - new QueryIdentifier(request.getQueryIdentifier().getAppIdentifier(),
    - request.getQueryIdentifier().getDagIdentifier()), request.getSrcName(),
    - request.getState());
    + QueryIdentifier queryId = new QueryIdentifier(request.getQueryIdentifier().getAppIdentifier(),
    + request.getQueryIdentifier().getDagIdentifier());
    + queryTracker.registerSourceStateChange(queryId, request.getSrcName(), request.getState());
          return SourceStateUpdatedResponseProto.getDefaultInstance();
        }

        @Override
    - public QueryCompleteResponseProto queryComplete(QueryCompleteRequestProto request) {
    + public QueryCompleteResponseProto queryComplete(
    + QueryCompleteRequestProto request) throws IOException {
          QueryIdentifier queryIdentifier =
              new QueryIdentifier(request.getQueryIdentifier().getAppIdentifier(),
                  request.getQueryIdentifier().getDagIdentifier());
          LOG.info("Processing queryComplete notification for {}", queryIdentifier);
    - List<QueryFragmentInfo> knownFragments =
    - queryTracker
    - .queryComplete(queryIdentifier, request.getDeleteDelay());
    + List<QueryFragmentInfo> knownFragments = queryTracker.queryComplete(
    + queryIdentifier, request.getDeleteDelay(), false);
          LOG.info("DBG: Pending fragment count for completed query {} = {}", queryIdentifier,
              knownFragments.size());
          for (QueryFragmentInfo fragmentInfo : knownFragments) {
    @@ -277,9 +277,16 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
        }

        @Override
    - public TerminateFragmentResponseProto terminateFragment(TerminateFragmentRequestProto request) {
    - LOG.info("DBG: Received terminateFragment request for {}", request.getFragmentIdentifierString());
    - executorService.killFragment(request.getFragmentIdentifierString());
    + public TerminateFragmentResponseProto terminateFragment(
    + TerminateFragmentRequestProto request) throws IOException {
    + String fragmentId = request.getFragmentIdentifierString();
    + LOG.info("DBG: Received terminateFragment request for {}", fragmentId);
    + // TODO: ideally, QueryTracker should have fragment-to-query mapping.
    + QueryIdentifier queryId = executorService.findQueryByFragment(fragmentId);
    + // checkPermissions returns false if query is not found, throws on failure.
    + if (queryId != null && queryTracker.checkPermissionsForQuery(queryId)) {
    + executorService.killFragment(fragmentId);
    + }
          return TerminateFragmentResponseProto.getDefaultInstance();
        }

    @@ -355,8 +362,12 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
        @Override
        public void queryFailed(QueryIdentifier queryIdentifier) {
          LOG.info("Processing query failed notification for {}", queryIdentifier);
    - List<QueryFragmentInfo> knownFragments =
    - queryTracker.queryComplete(queryIdentifier, -1);
    + List<QueryFragmentInfo> knownFragments;
    + try {
    + knownFragments = queryTracker.queryComplete(queryIdentifier, -1, true);
    + } catch (IOException e) {
    + throw new RuntimeException(e); // Should never happen here, no permission check.
    + }
          LOG.info("DBG: Pending fragment count for failed query {} = {}", queryIdentifier,
              knownFragments.size());
          for (QueryFragmentInfo fragmentInfo : knownFragments) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
    index 63cb16b..d23a44a 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
    @@ -26,12 +26,14 @@ import java.util.List;
      import java.util.Set;
      import java.util.concurrent.atomic.AtomicLong;
      import java.util.concurrent.atomic.AtomicReference;
    +import java.util.regex.Pattern;

      import javax.management.ObjectName;

      import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.hive.conf.HiveConf;
      import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
    +import org.apache.hadoop.hive.llap.DaemonId;
      import org.apache.hadoop.hive.llap.LlapUtil;
      import org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration;
      import org.apache.hadoop.hive.llap.daemon.ContainerRunner;
    @@ -57,11 +59,13 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
      import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
      import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge.UdfWhitelistChecker;
      import org.apache.hadoop.metrics2.util.MBeans;
    +import org.apache.hadoop.security.UserGroupInformation;
      import org.apache.hadoop.service.CompositeService;
      import org.apache.hadoop.util.ExitUtil;
      import org.apache.hadoop.util.JvmPauseMonitor;
      import org.apache.hadoop.util.StringUtils;
      import org.apache.hadoop.yarn.api.ApplicationConstants;
    +import org.apache.hadoop.yarn.util.ConverterUtils;
      import org.apache.hive.common.util.ShutdownHookManager;
      import org.apache.logging.log4j.core.config.Configurator;
      import org.slf4j.Logger;
    @@ -97,6 +101,13 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
        private final int numExecutors;
        private final long maxJvmMemory;
        private final String[] localDirs;
    + private final DaemonId daemonId;
    +
    + private final static Pattern hostsRe = Pattern.compile("[^A-Za-z0-9_-]");
    + private static String generateClusterName(Configuration conf) {
    + String hosts = HiveConf.getTrimmedVar(conf, ConfVars.LLAP_DAEMON_SERVICE_HOSTS);
    + return hostsRe.matcher(hosts.startsWith("@") ? hosts.substring(1) : hosts).replaceAll("_");
    + }

        // TODO Not the best way to share the address
        private final AtomicReference<InetSocketAddress> srvAddress = new AtomicReference<>(),
    @@ -105,11 +116,10 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla

        public LlapDaemon(Configuration daemonConf, int numExecutors, long executorMemoryBytes,
            boolean ioEnabled, boolean isDirectCache, long ioMemoryBytes, String[] localDirs, int srvPort,
    - int mngPort, int shufflePort, int webPort) {
    + int mngPort, int shufflePort, int webPort, String appName) {
          super("LlapDaemon");

          initializeLogging();
    -
          printAsciiArt();

          Preconditions.checkArgument(numExecutors > 0);
    @@ -129,6 +139,14 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
                "LLAP service hosts startswith '@' but hive.zookeeper.quorum is not set." +
                    " hive.zookeeper.quorum must be set.");
          }
    + String hostName = MetricsUtils.getHostName();
    + try {
    + daemonId = new DaemonId(UserGroupInformation.getCurrentUser().getUserName(),
    + generateClusterName(daemonConf), hostName, appName, System.currentTimeMillis());
    + } catch (IOException ex) {
    + throw new RuntimeException(ex);
    + }
    +

          this.maxJvmMemory = getTotalHeapSize();
          this.llapIoEnabled = ioEnabled;
    @@ -193,7 +211,7 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
          LlapMetricsSystem.initialize("LlapDaemon");
          this.pauseMonitor = new JvmPauseMonitor(daemonConf);
          pauseMonitor.start();
    - String displayName = "LlapDaemonExecutorMetrics-" + MetricsUtils.getHostName();
    + String displayName = "LlapDaemonExecutorMetrics-" + hostName;
          String sessionId = MetricsUtils.getUUID();
          daemonConf.set("llap.daemon.metrics.sessionid", sessionId);
          String[] strIntervals = HiveConf.getTrimmedStringsVar(daemonConf,
    @@ -223,11 +241,11 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
          this.amReporter = new AMReporter(srvAddress, new QueryFailedHandlerProxy(), daemonConf);

          this.server = new LlapProtocolServerImpl(
    - numHandlers, this, srvAddress, mngAddress, srvPort, mngPort);
    + numHandlers, this, srvAddress, mngAddress, srvPort, mngPort, daemonId);

          this.containerRunner = new ContainerRunnerImpl(daemonConf, numExecutors, waitQueueSize,
              enablePreemption, localDirs, this.shufflePort, srvAddress, executorMemoryBytes, metrics,
    - amReporter, executorClassLoader);
    + amReporter, executorClassLoader, daemonId.getClusterString());
          addIfService(containerRunner);

          // Not adding the registry as a service, since we need to control when it is initialized - conf used to pickup properties.
    @@ -377,10 +395,18 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
            LlapDaemonConfiguration daemonConf = new LlapDaemonConfiguration();

            String containerIdStr = System.getenv(ApplicationConstants.Environment.CONTAINER_ID.name());
    + String appName = null;
            if (containerIdStr != null && !containerIdStr.isEmpty()) {
              daemonConf.set(ConfVars.LLAP_DAEMON_CONTAINER_ID.varname, containerIdStr);
    + appName = ConverterUtils.toContainerId(containerIdStr)
    + .getApplicationAttemptId().getApplicationId().toString();
            } else {
              daemonConf.unset(ConfVars.LLAP_DAEMON_CONTAINER_ID.varname);
    + // Note, we assume production LLAP always runs under YARN.
    + LOG.error("Cannot find " + ApplicationConstants.Environment.CONTAINER_ID.toString()
    + + "; LLAP tokens may grant access to subsequent instances of the cluster with"
    + + " the same name");
    + appName = null;
            }

            int numExecutors = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_NUM_EXECUTORS);
    @@ -400,7 +426,8 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
            boolean isDirectCache = HiveConf.getBoolVar(daemonConf, ConfVars.LLAP_ALLOCATOR_DIRECT);
            boolean isLlapIo = HiveConf.getBoolVar(daemonConf, HiveConf.ConfVars.LLAP_IO_ENABLED, true);
            llapDaemon = new LlapDaemon(daemonConf, numExecutors, executorMemoryBytes, isLlapIo,
    - isDirectCache, ioMemoryBytes, localDirs, rpcPort, mngPort, shufflePort, webPort);
    + isDirectCache, ioMemoryBytes, localDirs, rpcPort, mngPort, shufflePort, webPort,
    + appName);

            LOG.info("Adding shutdown hook for LlapDaemon");
            ShutdownHookManager.addShutdownHook(new CompositeServiceShutdownHook(llapDaemon), 1);
    @@ -420,24 +447,27 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
        }

        @Override
    - public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws
    - IOException {
    + public SubmitWorkResponseProto submitWork(
    + SubmitWorkRequestProto request) throws IOException {
          numSubmissions.incrementAndGet();
          return containerRunner.submitWork(request);
        }

        @Override
    - public SourceStateUpdatedResponseProto sourceStateUpdated(SourceStateUpdatedRequestProto request) {
    + public SourceStateUpdatedResponseProto sourceStateUpdated(
    + SourceStateUpdatedRequestProto request) throws IOException {
          return containerRunner.sourceStateUpdated(request);
        }

        @Override
    - public QueryCompleteResponseProto queryComplete(QueryCompleteRequestProto request) {
    + public QueryCompleteResponseProto queryComplete(
    + QueryCompleteRequestProto request) throws IOException {
          return containerRunner.queryComplete(request);
        }

        @Override
    - public TerminateFragmentResponseProto terminateFragment(TerminateFragmentRequestProto request) {
    + public TerminateFragmentResponseProto terminateFragment(
    + TerminateFragmentRequestProto request) throws IOException {
          return containerRunner.terminateFragment(request);
        }


    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
    index dae1a3a..db8bfa6 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java
    @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
      import org.apache.hadoop.hive.conf.HiveConf;
      import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
    +import org.apache.hadoop.hive.llap.DaemonId;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto;
    @@ -71,13 +72,11 @@ public class LlapProtocolServerImpl extends AbstractService
        private final AtomicReference<InetSocketAddress> srvAddress, mngAddress;
        private SecretManager zkSecretManager;
        private String restrictedToUser = null;
    + private final DaemonId daemonId;

    - public LlapProtocolServerImpl(int numHandlers,
    - ContainerRunner containerRunner,
    - AtomicReference<InetSocketAddress> srvAddress,
    - AtomicReference<InetSocketAddress> mngAddress,
    - int srvPort,
    - int mngPort) {
    + public LlapProtocolServerImpl(int numHandlers, ContainerRunner containerRunner,
    + AtomicReference<InetSocketAddress> srvAddress, AtomicReference<InetSocketAddress> mngAddress,
    + int srvPort, int mngPort, DaemonId daemonId) {
          super("LlapDaemonProtocolServerImpl");
          this.numHandlers = numHandlers;
          this.containerRunner = containerRunner;
    @@ -85,14 +84,14 @@ public class LlapProtocolServerImpl extends AbstractService
          this.srvPort = srvPort;
          this.mngAddress = mngAddress;
          this.mngPort = mngPort;
    + this.daemonId = daemonId;
          LOG.info("Creating: " + LlapProtocolServerImpl.class.getSimpleName() +
              " with port configured to: " + srvPort);
        }

        @Override
        public SubmitWorkResponseProto submitWork(RpcController controller,
    - SubmitWorkRequestProto request) throws
    - ServiceException {
    + SubmitWorkRequestProto request) throws ServiceException {
          try {
            return containerRunner.submitWork(request);
          } catch (IOException e) {
    @@ -103,20 +102,31 @@ public class LlapProtocolServerImpl extends AbstractService
        @Override
        public SourceStateUpdatedResponseProto sourceStateUpdated(RpcController controller,
            SourceStateUpdatedRequestProto request) throws ServiceException {
    - return containerRunner.sourceStateUpdated(request);
    + try {
    + return containerRunner.sourceStateUpdated(request);
    + } catch (IOException e) {
    + throw new ServiceException(e);
    + }
        }

        @Override
        public QueryCompleteResponseProto queryComplete(RpcController controller,
            QueryCompleteRequestProto request) throws ServiceException {
    - return containerRunner.queryComplete(request);
    + try {
    + return containerRunner.queryComplete(request);
    + } catch (IOException e) {
    + throw new ServiceException(e);
    + }
        }

        @Override
        public TerminateFragmentResponseProto terminateFragment(
    - RpcController controller,
    - TerminateFragmentRequestProto request) throws ServiceException {
    - return containerRunner.terminateFragment(request);
    + RpcController controller, TerminateFragmentRequestProto request) throws ServiceException {
    + try {
    + return containerRunner.terminateFragment(request);
    + } catch (IOException e) {
    + throw new ServiceException(e);
    + }
        }

        @Override
    @@ -140,7 +150,7 @@ public class LlapProtocolServerImpl extends AbstractService
          }
          String llapPrincipal = HiveConf.getVar(conf, ConfVars.LLAP_KERBEROS_PRINCIPAL),
              llapKeytab = HiveConf.getVar(conf, ConfVars.LLAP_KERBEROS_KEYTAB_FILE);
    - zkSecretManager = SecretManager.createSecretManager(conf, llapPrincipal, llapKeytab);
    + zkSecretManager = SecretManager.createSecretManager(conf, llapPrincipal, llapKeytab, daemonId);

          // Start the protocol server after properly authenticating with daemon keytab.
          UserGroupInformation daemonUgi = null;
    @@ -275,7 +285,8 @@ public class LlapProtocolServerImpl extends AbstractService
            realUser = new Text(ugi.getRealUser().getUserName());
          }
          Text renewer = new Text(ugi.getShortUserName());
    - LlapTokenIdentifier llapId = new LlapTokenIdentifier(owner, renewer, realUser);
    + LlapTokenIdentifier llapId = new LlapTokenIdentifier(owner, renewer, realUser,
    + daemonId.getClusterString(), request.hasAppId() ? request.getAppId() : null);
          // TODO: note that the token is not renewable right now and will last for 2 weeks by default.
          Token<LlapTokenIdentifier> token = new Token<LlapTokenIdentifier>(llapId, zkSecretManager);
          if (LOG.isInfoEnabled()) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTokenChecker.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTokenChecker.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTokenChecker.java
    new file mode 100644
    index 0000000..03ee055
    --- /dev/null
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTokenChecker.java
    @@ -0,0 +1,137 @@
    +/*
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.hadoop.hive.llap.daemon.impl;
    +
    +import com.google.common.annotations.VisibleForTesting;
    +
    +import java.util.ArrayList;
    +
    +import java.util.List;
    +
    +import java.io.IOException;
    +
    +import org.apache.commons.lang3.StringUtils;
    +import org.apache.commons.lang3.tuple.ImmutablePair;
    +import org.apache.commons.lang3.tuple.Pair;
    +import org.apache.hadoop.hive.llap.security.LlapTokenIdentifier;
    +import org.apache.hadoop.security.UserGroupInformation;
    +import org.apache.hadoop.security.token.TokenIdentifier;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
    +
    +public final class LlapTokenChecker {
    + private static final Logger LOG = LoggerFactory.getLogger(LlapTokenChecker.class);
    +
    + private static final ImmutablePair<String, String> NO_SECURITY = new ImmutablePair<>(null, null);
    + public static Pair<String, String> getTokenInfo(String clusterId) throws IOException {
    + if (!UserGroupInformation.isSecurityEnabled()) return NO_SECURITY;
    + UserGroupInformation current = UserGroupInformation.getCurrentUser();
    + String kerberosName = current.hasKerberosCredentials() ? current.getShortUserName() : null;
    + List<LlapTokenIdentifier> tokens = getLlapTokens(current, clusterId);
    + if ((tokens == null || tokens.isEmpty()) && kerberosName == null) {
    + throw new SecurityException("No tokens or kerberos for " + current);
    + }
    + return getTokenInfoInternal(kerberosName, tokens);
    + }
    +
    + private static List<LlapTokenIdentifier> getLlapTokens(
    + UserGroupInformation ugi, String clusterId) {
    + List<LlapTokenIdentifier> tokens = null;
    + for (TokenIdentifier id : ugi.getTokenIdentifiers()) {
    + if (!LlapTokenIdentifier.KIND_NAME.equals(id.getKind())) continue;
    + if (LOG.isDebugEnabled()) {
    + LOG.debug("Token {}", id);
    + }
    + LlapTokenIdentifier llapId = (LlapTokenIdentifier)id;
    + if (!clusterId.equals(llapId.getClusterId())) continue;
    + if (tokens == null) {
    + tokens = new ArrayList<>();
    + }
    + tokens.add((LlapTokenIdentifier)id);
    + }
    + return tokens;
    + }
    +
    + @VisibleForTesting
    + static Pair<String, String> getTokenInfoInternal(
    + String kerberosName, List<LlapTokenIdentifier> tokens) {
    + assert (tokens != null && !tokens.isEmpty()) || kerberosName != null;
    + if (tokens == null) {
    + return new ImmutablePair<String, String>(kerberosName, null);
    + }
    + String userName = kerberosName, appId = null;
    + for (LlapTokenIdentifier llapId : tokens) {
    + String newUserName = llapId.getRealUser().toString();
    + if (userName != null && !userName.equals(newUserName)) {
    + throw new SecurityException("Ambiguous user name from credentials - " + userName
    + + " and " + newUserName + " from " + llapId
    + + ((kerberosName == null) ? ("; has kerberos credentials for " + kerberosName) : ""));
    + }
    + userName = newUserName;
    + String newAppId = llapId.getAppId();
    + if (!StringUtils.isEmpty(newAppId)) {
    + if (!StringUtils.isEmpty(appId) && !appId.equals(newAppId)) {
    + throw new SecurityException("Ambiguous app ID from credentials - " + appId
    + + " and " + newAppId + " from " + llapId);
    + }
    + appId = newAppId;
    + }
    + }
    + assert userName != null;
    + return new ImmutablePair<String, String>(userName, appId);
    + }
    +
    + public static void checkPermissions(
    + String clusterId, String userName, String appId, Object hint) throws IOException {
    + if (!UserGroupInformation.isSecurityEnabled()) return;
    + UserGroupInformation current = UserGroupInformation.getCurrentUser();
    + String kerberosName = current.hasKerberosCredentials() ? current.getShortUserName() : null;
    + List<LlapTokenIdentifier> tokens = getLlapTokens(current, clusterId);
    + checkPermissionsInternal(kerberosName, tokens, userName, appId, hint);
    + }
    +
    + @VisibleForTesting
    + static void checkPermissionsInternal(String kerberosName, List<LlapTokenIdentifier> tokens,
    + String userName, String appId, Object hint) {
    + if (kerberosName != null && StringUtils.isEmpty(appId) && kerberosName.equals(userName)) {
    + return;
    + }
    + if (tokens != null) {
    + for (LlapTokenIdentifier llapId : tokens) {
    + String tokenUser = llapId.getRealUser().toString(), tokenAppId = llapId.getAppId();
    + if (checkTokenPermissions(userName, appId, tokenUser, tokenAppId)) return;
    + }
    + }
    + throw new SecurityException("Unauthorized to access "
    + + userName + ", " + appId.hashCode() + " (" + hint + ")");
    + }
    +
    + public static void checkPermissions(
    + Pair<String, String> prm, String userName, String appId, Object hint) {
    + if (userName == null) {
    + assert StringUtils.isEmpty(appId);
    + return;
    + }
    + if (!checkTokenPermissions(userName, appId, prm.getLeft(), prm.getRight())) {
    + throw new SecurityException("Unauthorized to access "
    + + userName + ", " + appId.hashCode() + " (" + hint + ")");
    + }
    + }
    +
    + private static boolean checkTokenPermissions(
    + String userName, String appId, String tokenUser, String tokenAppId) {
    + return userName.equals(tokenUser)
    + && (StringUtils.isEmpty(appId) || appId.equals(tokenAppId));
    + }
    +}
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
    index 64c2b58..8daef9e 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
    @@ -55,10 +55,11 @@ public class QueryInfo {
        private final ConcurrentMap<String, SourceStateProto> sourceStateMap;

        private final FinishableStateTracker finishableStateTracker = new FinishableStateTracker();
    + private final String tokenUserName, appId;

    - public QueryInfo(QueryIdentifier queryIdentifier, String appIdString, String dagName, int dagIdentifier,
    - String user, ConcurrentMap<String, SourceStateProto> sourceStateMap,
    - String[] localDirsBase, FileSystem localFs) {
    + public QueryInfo(QueryIdentifier queryIdentifier, String appIdString, String dagName,
    + int dagIdentifier, String user, ConcurrentMap<String, SourceStateProto> sourceStateMap,
    + String[] localDirsBase, FileSystem localFs, String tokenUserName, String tokenAppId) {
          this.queryIdentifier = queryIdentifier;
          this.appIdString = appIdString;
          this.dagName = dagName;
    @@ -67,6 +68,8 @@ public class QueryInfo {
          this.user = user;
          this.localDirsBase = localDirsBase;
          this.localFs = localFs;
    + this.tokenUserName = tokenUserName;
    + this.appId = tokenAppId;
        }

        public QueryIdentifier getQueryIdentifier() {
    @@ -270,4 +273,12 @@ public class QueryInfo {
            this.lastFinishableState = lastFinishableState;
          }
        }
    +
    + public String getTokenUserName() {
    + return tokenUserName;
    + }
    +
    + public String getTokenAppId() {
    + return appId;
    + }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
    index 14657e6..cb3be2b 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
    @@ -18,6 +18,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
      import java.util.concurrent.Executors;
      import java.util.concurrent.ScheduledExecutorService;
      import java.util.concurrent.TimeUnit;
    +
    +import org.apache.commons.lang3.tuple.Pair;
      import org.apache.hadoop.fs.Path;
      import org.apache.hadoop.security.token.Token;
      import org.apache.tez.common.CallableWithNdc;
    @@ -60,6 +62,7 @@ public class QueryTracker extends AbstractService {

        private final String[] localDirsBase;
        private final FileSystem localFs;
    + private final String clusterId;
        private final long defaultDeleteDelaySeconds;

        // TODO At the moment there's no way of knowing whether a query is running or not.
    @@ -89,9 +92,10 @@ public class QueryTracker extends AbstractService {
        private final ConcurrentHashMap<QueryIdentifier, String> queryIdentifierToHiveQueryId =
            new ConcurrentHashMap<>();

    - public QueryTracker(Configuration conf, String[] localDirsBase) {
    + public QueryTracker(Configuration conf, String[] localDirsBase, String clusterId) {
          super("QueryTracker");
          this.localDirsBase = localDirsBase;
    + this.clusterId = clusterId;
          try {
            localFs = FileSystem.getLocal(conf);
          } catch (IOException e) {
    @@ -119,35 +123,50 @@ public class QueryTracker extends AbstractService {
         * @param user
         * @throws IOException
         */
    - QueryFragmentInfo registerFragment(QueryIdentifier queryIdentifier, String appIdString, String dagName,
    - int dagIdentifier, String vertexName, int fragmentNumber, int attemptNumber, String user,
    - FragmentSpecProto fragmentSpec, Token<JobTokenIdentifier> appToken) throws IOException {
    + QueryFragmentInfo registerFragment(QueryIdentifier queryIdentifier, String appIdString,
    + String dagName, int dagIdentifier, String vertexName, int fragmentNumber, int attemptNumber,
    + String user, FragmentSpecProto fragmentSpec, Token<JobTokenIdentifier> appToken)
    + throws IOException {
          ReadWriteLock dagLock = getDagLock(queryIdentifier);
          dagLock.readLock().lock();
          try {
    - if (!completedDagMap.contains(queryIdentifier)) {
    - QueryInfo queryInfo = queryInfoMap.get(queryIdentifier);
    - if (queryInfo == null) {
    - queryInfo = new QueryInfo(queryIdentifier, appIdString, dagName, dagIdentifier, user,
    - getSourceCompletionMap(queryIdentifier), localDirsBase, localFs);
    - queryInfoMap.putIfAbsent(queryIdentifier, queryInfo);
    - }
    -
    - if (LOG.isDebugEnabled()) {
    - LOG.debug("Registering request for {} with the ShuffleHandler", queryIdentifier);
    - }
    - ShuffleHandler.get()
    - .registerDag(appIdString, dagIdentifier, appToken,
    - user, queryInfo.getLocalDirs());
    -
    - return queryInfo.registerFragment(vertexName, fragmentNumber, attemptNumber, fragmentSpec);
    - } else {
    + if (completedDagMap.contains(queryIdentifier)) {
              // Cleanup the dag lock here, since it may have been created after the query completed
              dagSpecificLocks.remove(queryIdentifier);
              throw new RuntimeException(
                  "Dag " + dagName + " already complete. Rejecting fragment ["
                      + vertexName + ", " + fragmentNumber + ", " + attemptNumber + "]");
            }
    + // TODO: for now, we get the secure username out of UGI... after signing, we can take it
    + // out of the request provided that it's signed.
    + Pair<String, String> tokenInfo = LlapTokenChecker.getTokenInfo(clusterId);
    + boolean isExistingQueryInfo = true;
    + QueryInfo queryInfo = queryInfoMap.get(queryIdentifier);
    + if (queryInfo == null) {
    + queryInfo = new QueryInfo(queryIdentifier, appIdString, dagName, dagIdentifier, user,
    + getSourceCompletionMap(queryIdentifier), localDirsBase, localFs,
    + tokenInfo.getLeft(), tokenInfo.getRight());
    + QueryInfo old = queryInfoMap.putIfAbsent(queryIdentifier, queryInfo);
    + if (old != null) {
    + queryInfo = old;
    + } else {
    + isExistingQueryInfo = false;
    + }
    + }
    + if (isExistingQueryInfo) {
    + // We already retrieved the incoming info, check without UGI.
    + LlapTokenChecker.checkPermissions(tokenInfo, queryInfo.getTokenUserName(),
    + queryInfo.getTokenAppId(), queryInfo.getQueryIdentifier());
    + }
    +
    + if (LOG.isDebugEnabled()) {
    + LOG.debug("Registering request for {} with the ShuffleHandler", queryIdentifier);
    + }
    + ShuffleHandler.get()
    + .registerDag(appIdString, dagIdentifier, appToken,
    + user, queryInfo.getLocalDirs());
    +
    + return queryInfo.registerFragment(vertexName, fragmentNumber, attemptNumber, fragmentSpec);
          } finally {
            dagLock.readLock().unlock();
          }
    @@ -174,17 +193,20 @@ public class QueryTracker extends AbstractService {
         * @param queryIdentifier
         * @param deleteDelay
         */
    - List<QueryFragmentInfo> queryComplete(QueryIdentifier queryIdentifier, long deleteDelay) {
    + List<QueryFragmentInfo> queryComplete(QueryIdentifier queryIdentifier, long deleteDelay,
    + boolean isInternal) throws IOException {
          if (deleteDelay == -1) {
            deleteDelay = defaultDeleteDelaySeconds;
          }
          ReadWriteLock dagLock = getDagLock(queryIdentifier);
          dagLock.writeLock().lock();
          try {
    + QueryInfo queryInfo = isInternal
    + ? queryInfoMap.get(queryIdentifier) : checkPermissionsAndGetQuery(queryIdentifier);
            rememberCompletedDag(queryIdentifier);
            LOG.info("Processing queryComplete for queryIdentifier={} with deleteDelay={} seconds", queryIdentifier,
                deleteDelay);
    - QueryInfo queryInfo = queryInfoMap.remove(queryIdentifier);
    + queryInfoMap.remove(queryIdentifier);
            if (queryInfo == null) {
              LOG.warn("Ignoring query complete for unknown dag: {}", queryIdentifier);
              return Collections.emptyList();
    @@ -229,9 +251,10 @@ public class QueryTracker extends AbstractService {
         * @param sourceName
         * @param sourceState
         */
    - void registerSourceStateChange(QueryIdentifier queryIdentifier, String sourceName, SourceStateProto sourceState) {
    + void registerSourceStateChange(QueryIdentifier queryIdentifier, String sourceName,
    + SourceStateProto sourceState) throws IOException {
          getSourceCompletionMap(queryIdentifier).put(sourceName, sourceState);
    - QueryInfo queryInfo = queryInfoMap.get(queryIdentifier);
    + QueryInfo queryInfo = checkPermissionsAndGetQuery(queryIdentifier);
          if (queryInfo != null) {
            queryInfo.sourceStateUpdated(sourceName);
          } else {
    @@ -322,4 +345,16 @@ public class QueryTracker extends AbstractService {
            return null;
          }
        }
    +
    + private QueryInfo checkPermissionsAndGetQuery(QueryIdentifier queryId) throws IOException {
    + QueryInfo queryInfo = queryInfoMap.get(queryId);
    + if (queryInfo == null) return null;
    + LlapTokenChecker.checkPermissions(clusterId, queryInfo.getTokenAppId(),
    + queryInfo.getTokenUserName(), queryInfo.getQueryIdentifier());
    + return queryInfo;
    + }
    +
    + public boolean checkPermissionsForQuery(QueryIdentifier queryId) throws IOException {
    + return checkPermissionsAndGetQuery(queryId) != null;
    + }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java
    index 26c8e55..fd6234a 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/Scheduler.java
    @@ -44,4 +44,6 @@ public interface Scheduler<T> {
        void killFragment(String fragmentId);

        Set<String> getExecutorsStatus();
    +
    + QueryIdentifier findQueryByFragment(String fragmentId);
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
    index f621af2..1933eb1 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
    @@ -420,6 +420,15 @@ public class TaskExecutorService extends AbstractService implements Scheduler<Ta
        }

        @Override
    + public QueryIdentifier findQueryByFragment(String fragmentId) {
    + synchronized (lock) {
    + TaskWrapper taskWrapper = knownTasks.get(fragmentId);
    + return taskWrapper == null ? null : taskWrapper.getTaskRunnerCallable()
    + .getFragmentInfo().getQueryInfo().getQueryIdentifier();
    + }
    + }
    +
    + @Override
        public void killFragment(String fragmentId) {
          synchronized (lock) {
            TaskWrapper taskWrapper = knownTasks.remove(fragmentId);

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/java/org/apache/hadoop/hive/llap/security/LlapSecurityHelper.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/security/LlapSecurityHelper.java b/llap-server/src/java/org/apache/hadoop/hive/llap/security/LlapSecurityHelper.java
    index 76ba225..f958bc4 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/security/LlapSecurityHelper.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/security/LlapSecurityHelper.java
    @@ -26,6 +26,7 @@ import java.util.concurrent.TimeUnit;

      import javax.net.SocketFactory;

    +import org.apache.commons.lang3.StringUtils;
      import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.hive.llap.impl.LlapManagementProtocolClientImpl;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto;
    @@ -81,7 +82,7 @@ public class LlapSecurityHelper implements LlapTokenProvider {
        }

        @Override
    - public Token<LlapTokenIdentifier> getDelegationToken() throws IOException {
    + public Token<LlapTokenIdentifier> getDelegationToken(String appId) throws IOException {
          if (!UserGroupInformation.isSecurityEnabled()) return null;
          if (llapUgi == null) {
            llapUgi = UserGroupInformation.getCurrentUser();
    @@ -98,7 +99,7 @@ public class LlapSecurityHelper implements LlapTokenProvider {
          boolean hasRefreshed = false;
          while (true) {
            try {
    - tokenBytes = getTokenBytes();
    + tokenBytes = getTokenBytes(appId);
              break;
            } catch (InterruptedException ie) {
              throw new RuntimeException(ie);
    @@ -128,7 +129,8 @@ public class LlapSecurityHelper implements LlapTokenProvider {
          return token;
        }

    - private ByteString getTokenBytes() throws InterruptedException, IOException {
    + private ByteString getTokenBytes(
    + final String appId) throws InterruptedException, IOException {
          return llapUgi.doAs(new PrivilegedExceptionAction<ByteString>() {
            @Override
            public ByteString run() throws Exception {
    @@ -138,8 +140,11 @@ public class LlapSecurityHelper implements LlapTokenProvider {
                    clientInstance.getManagementPort(), retryPolicy, socketFactory);
              }
              // Client only connects on the first call, so this has to be done in doAs.
    - GetTokenRequestProto req = GetTokenRequestProto.newBuilder().build();
    - return client.getDelegationToken(null, req).getToken();
    + GetTokenRequestProto.Builder req = GetTokenRequestProto.newBuilder();
    + if (!StringUtils.isBlank(appId)) {
    + req.setAppId(appId);
    + }
    + return client.getDelegationToken(null, req.build()).getToken();
            }
          });
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/java/org/apache/hadoop/hive/llap/security/SecretManager.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/security/SecretManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/security/SecretManager.java
    index 8c7a539..c54e726 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/security/SecretManager.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/security/SecretManager.java
    @@ -23,6 +23,7 @@ import java.util.regex.Pattern;
      import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.hive.conf.HiveConf;
      import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
    +import org.apache.hadoop.hive.llap.DaemonId;
      import org.apache.hadoop.hive.llap.security.LlapTokenIdentifier;
      import org.apache.hadoop.security.UserGroupInformation;
      import org.apache.hadoop.security.token.Token;
    @@ -33,6 +34,7 @@ import org.slf4j.LoggerFactory;

      public class SecretManager extends ZKDelegationTokenSecretManager<LlapTokenIdentifier> {
        private static final Logger LOG = LoggerFactory.getLogger(SecretManager.class);
    +
        public SecretManager(Configuration conf) {
          super(conf);
          checkForZKDTSMBug(conf);
    @@ -82,16 +84,8 @@ public class SecretManager extends ZKDelegationTokenSecretManager<LlapTokenIdent
          return id;
        }

    - private final static Pattern hostsRe = Pattern.compile("[^A-Za-z0-9_-]");
    - private static String deriveZkPath(Configuration conf) throws IOException {
    - String hosts = HiveConf.getTrimmedVar(conf, ConfVars.LLAP_DAEMON_SERVICE_HOSTS);
    - String clusterName = hosts.startsWith("@") ? hosts.substring(1) : hosts;
    - String userName = UserGroupInformation.getCurrentUser().getShortUserName();
    - return hostsRe.matcher(userName + "_" + clusterName).replaceAll("_") ;
    - }
    -
        public static SecretManager createSecretManager(
    - final Configuration conf, String llapPrincipal, String llapKeytab) {
    + final Configuration conf, String llapPrincipal, String llapKeytab, DaemonId daemonId) {
          // Create ZK connection under a separate ugi (if specified) - ZK works in mysterious ways.
          UserGroupInformation zkUgi = null;
          String principal = HiveConf.getVar(conf, ConfVars.LLAP_ZKSM_KERBEROS_PRINCIPAL, llapPrincipal);
    @@ -110,12 +104,7 @@ public class SecretManager extends ZKDelegationTokenSecretManager<LlapTokenIdent
          zkConf.setLong(DelegationTokenManager.RENEW_INTERVAL, tokenLifetime);
          zkConf.set(SecretManager.ZK_DTSM_ZK_KERBEROS_PRINCIPAL, principal);
          zkConf.set(SecretManager.ZK_DTSM_ZK_KERBEROS_KEYTAB, keyTab);
    - String zkPath;
    - try {
    - zkPath = deriveZkPath(conf);
    - } catch (IOException e) {
    - throw new RuntimeException(e);
    - }
    + String zkPath = daemonId.getClusterString();
          LOG.info("Using {} as ZK secret manager path", zkPath);
          zkConf.set(SecretManager.ZK_DTSM_ZNODE_WORKING_PATH, "zkdtsm_" + zkPath);
          setZkConfIfNotSet(zkConf, SecretManager.ZK_DTSM_ZK_AUTH_TYPE, "sasl");

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
    index 610f266..dde5be0 100644
    --- a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
    +++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
    @@ -193,7 +193,7 @@ public class MiniLlapCluster extends AbstractService {
          LOG.info("Initializing {} llap instances for MiniLlapCluster with name={}", numInstances, clusterNameTrimmed);
          for (int i = 0 ;i < numInstances ; i++) {
            llapDaemons[i] = new LlapDaemon(conf, numExecutorsPerService, execBytesPerService, llapIoEnabled,
    - ioIsDirect, ioBytesPerService, localDirs, rpcPort, mngPort, shufflePort, webPort);
    + ioIsDirect, ioBytesPerService, localDirs, rpcPort, mngPort, shufflePort, webPort, clusterNameTrimmed);
            llapDaemons[i].init(new Configuration(conf));
          }
          LOG.info("Initialized {} llap instances for MiniLlapCluster with name={}", numInstances, clusterNameTrimmed);

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java
    index 24f4442..c6ba14e 100644
    --- a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java
    +++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java
    @@ -78,7 +78,7 @@ public class TaskExecutorTestHelpers {
          QueryInfo queryInfo =
              new QueryInfo(queryIdentifier, "fake_app_id_string", "fake_dag_name", 1, "fakeUser",
                  new ConcurrentHashMap<String, LlapDaemonProtocolProtos.SourceStateProto>(),
    - new String[0], null);
    + new String[0], null, null, null);
          return queryInfo;
        }


    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java
    index a65bf5c..fd37a06 100644
    --- a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java
    +++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapDaemonProtocolServerImpl.java
    @@ -46,7 +46,7 @@ public class TestLlapDaemonProtocolServerImpl {
          LlapProtocolServerImpl server =
              new LlapProtocolServerImpl(numHandlers, containerRunnerMock,
                 new AtomicReference<InetSocketAddress>(), new AtomicReference<InetSocketAddress>(),
    - rpcPort, rpcPort + 1);
    + rpcPort, rpcPort + 1, null);
          when(containerRunnerMock.submitWork(any(SubmitWorkRequestProto.class))).thenReturn(
              SubmitWorkResponseProto
                  .newBuilder()

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapTokenChecker.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapTokenChecker.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapTokenChecker.java
    new file mode 100644
    index 0000000..aaaa762
    --- /dev/null
    +++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TestLlapTokenChecker.java
    @@ -0,0 +1,96 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.hadoop.hive.llap.daemon.impl;
    +
    +import static org.junit.Assert.*;
    +
    +import org.apache.hadoop.io.Text;
    +
    +import org.apache.commons.lang3.tuple.Pair;
    +import org.apache.hadoop.hive.llap.security.LlapTokenIdentifier;
    +
    +import java.util.ArrayList;
    +import java.util.List;
    +
    +import org.junit.Test;
    +
    +public class TestLlapTokenChecker {
    +
    + @Test
    + public void testGetToken() {
    + check(LlapTokenChecker.getTokenInfoInternal("u", null), "u", null);
    + check(LlapTokenChecker.getTokenInfoInternal(null, createTokens("u", null)), "u", null);
    + check(LlapTokenChecker.getTokenInfoInternal(null, createTokens("u", "a")), "u", "a");
    + check(LlapTokenChecker.getTokenInfoInternal("u", createTokens("u", "a")), "u", "a");
    + check(LlapTokenChecker.getTokenInfoInternal("u", createTokens("u", "a", "u", null)),
    + "u", "a");
    + // Note - some of these scenarios could be handled, but they are not supported right now.
    + // The reason is that we bind a query to app/user using the signed token information, and
    + // we don't want to bother figuring out which one to use in case of ambiguity w/o a use case.
    + checkGetThrows("u", createTokens("u2", "a")); // Ambiguous user.
    + checkGetThrows("u2", createTokens("u2", "a", "u3", "a")); // Ambiguous user.
    + checkGetThrows(null, createTokens("u2", "a", "u3", "a")); // Ambiguous user.
    + checkGetThrows(null, createTokens("u2", "a", "u2", "a1")); // Ambiguous app.
    + }
    +
    + @Test
    + public void testCheckPermissions() {
    + LlapTokenChecker.checkPermissionsInternal("u", null, "u", null, null);
    + LlapTokenChecker.checkPermissionsInternal(null, createTokens("u", null) , "u", null, null);
    + LlapTokenChecker.checkPermissionsInternal("u", createTokens("u", "a") , "u", "a", null);
    + // No access.
    + checkPrmThrows("u2", null, "u", "a");
    + checkPrmThrows("u", null, "u", "a"); // Note - Kerberos user w/o appId doesn't have access.
    + checkPrmThrows(null, createTokens("u2", "a"), "u", "a");
    + checkPrmThrows(null, createTokens("u", "a2"), "u", "a");
    + checkPrmThrows(null, createTokens("u", null), "u", "a");
    + }
    +
    + private List<LlapTokenIdentifier> createTokens(String... args) {
    + List<LlapTokenIdentifier> tokens = new ArrayList<>();
    + for (int i = 0; i < args.length; i += 2) {
    + tokens.add(new LlapTokenIdentifier(null, null, new Text(args[i]), "c", args[i + 1]));
    + }
    + return tokens;
    + }
    +
    + private void checkGetThrows(String kerberosName, List<LlapTokenIdentifier> tokens) {
    + try {
    + LlapTokenChecker.getTokenInfoInternal(kerberosName, tokens);
    + fail("Didn't throw");
    + } catch (SecurityException ex) {
    + // Expected.
    + }
    + }
    +
    + private void checkPrmThrows(
    + String kerberosName, List<LlapTokenIdentifier> tokens, String userName, String appId) {
    + try {
    + LlapTokenChecker.checkPermissionsInternal(kerberosName, tokens, userName, appId, null);
    + fail("Didn't throw");
    + } catch (SecurityException ex) {
    + // Expected.
    + }
    + }
    +
    + private void check(Pair<String, String> p, String user, String appId) {
    + assertEquals(user, p.getLeft());
    + assertEquals(appId, p.getRight());
    + }
    +}
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/868e5e14/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
    index 3ea5ef9..fd6465a 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
    @@ -275,7 +275,8 @@ public class TezSessionState {
          if (llapMode) {
            if (UserGroupInformation.isSecurityEnabled()) {
              LlapTokenProvider tp = LlapProxy.getOrInitTokenProvider(conf);
    - Token<LlapTokenIdentifier> token = tp.getDelegationToken();
    + // For Tez, we don't use appId to distinguish the tokens; security scope is the user.
    + Token<LlapTokenIdentifier> token = tp.getDelegationToken(null);
              if (LOG.isInfoEnabled()) {
                LOG.info("Obtained a LLAP token: " + token);
              }
  • Spena at May 6, 2016 at 8:42 pm
    HIVE-13653 : improve config error messages for LLAP cache size/etc (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f41d693b
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f41d693b
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f41d693b

    Branch: refs/heads/java8
    Commit: f41d693b5b984ea55b01394af0dbb6c7121db90a
    Parents: 96f2dc7
    Author: Sergey Shelukhin <sershe@apache.org>
    Authored: Thu May 5 10:41:47 2016 -0700
    Committer: Sergey Shelukhin <sershe@apache.org>
    Committed: Thu May 5 10:41:47 2016 -0700

    ----------------------------------------------------------------------
      .../hadoop/hive/llap/cache/BuddyAllocator.java | 43 +++++++++++++++-----
      1 file changed, 32 insertions(+), 11 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/f41d693b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
    index d78c1e0..1d5a7db 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
    @@ -44,6 +44,8 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
        // We don't know the acceptable size for Java array, so we'll use 1Gb boundary.
        // That is guaranteed to fit any maximum allocation.
        private static final int MAX_ARENA_SIZE = 1024*1024*1024;
    + // Don't try to operate with less than MIN_SIZE allocator space, it will just give you grief.
    + private static final int MIN_TOTAL_MEMORY_SIZE = 64*1024*1024;


        public BuddyAllocator(Configuration conf, MemoryManager mm, LlapDaemonCacheMetrics metrics) {
    @@ -51,8 +53,19 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
              (int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC),
              (int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MAX_ALLOC),
              HiveConf.getIntVar(conf, ConfVars.LLAP_ALLOCATOR_ARENA_COUNT),
    - HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE),
    - mm, metrics);
    + getMaxTotalMemorySize(conf), mm, metrics);
    + }
    +
    + private static long getMaxTotalMemorySize(Configuration conf) {
    + long maxSize = HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE);
    + if (maxSize > MIN_TOTAL_MEMORY_SIZE || HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) {
    + return maxSize;
    + }
    + throw new RuntimeException("Allocator space is too small for reasonable operation; "
    + + ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname + "=" + maxSize + ", but at least "
    + + MIN_TOTAL_MEMORY_SIZE + " is required. If you cannot spare any memory, you can "
    + + "disable LLAP IO entirely via " + ConfVars.LLAP_IO_ENABLED.varname + "; or set "
    + + ConfVars.LLAP_IO_MEMORY_MODE.varname + " to 'none'");
        }

        @VisibleForTesting
    @@ -69,16 +82,19 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
                + ", arena size " + arenaSizeVal + ". total size " + maxSizeVal);
          }

    + String minName = ConfVars.LLAP_ALLOCATOR_MIN_ALLOC.varname,
    + maxName = ConfVars.LLAP_ALLOCATOR_MAX_ALLOC.varname;
          if (minAllocation < 8) {
    - throw new AssertionError("Min allocation must be at least 8 bytes: " + minAllocation);
    + throw new RuntimeException(minName + " must be at least 8 bytes: " + minAllocation);
          }
    - if (maxSizeVal < arenaSizeVal || maxAllocation < minAllocation) {
    - throw new AssertionError("Inconsistent sizes of cache, arena and allocations: "
    - + minAllocation + ", " + maxAllocation + ", " + arenaSizeVal + ", " + maxSizeVal);
    + if (maxSizeVal < maxAllocation || maxAllocation < minAllocation) {
    + throw new RuntimeException("Inconsistent sizes; expecting " + minName + " <= " + maxName
    + + " <= " + ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname + "; configured with min="
    + + minAllocation + ", max=" + maxAllocation + " and total=" + maxSizeVal);
          }
          if ((Integer.bitCount(minAllocation) != 1) || (Integer.bitCount(maxAllocation) != 1)) {
    - throw new AssertionError("Allocation sizes must be powers of two: "
    - + minAllocation + ", " + maxAllocation);
    + throw new RuntimeException("Allocation sizes must be powers of two; configured with "
    + + minName + "=" + minAllocation + ", " + maxName + "=" + maxAllocation);
          }
          if ((arenaSizeVal % maxAllocation) > 0) {
            long oldArenaSize = arenaSizeVal;
    @@ -94,8 +110,8 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
                + " to be divisible by arena size " + arenaSize);
          }
          if ((maxSizeVal / arenaSize) > Integer.MAX_VALUE) {
    - throw new AssertionError(
    - "Too many arenas needed to allocate the cache: " + arenaSize + "," + maxSizeVal);
    + throw new RuntimeException(
    + "Too many arenas needed to allocate the cache: " + arenaSize + ", " + maxSizeVal);
          }
          maxSize = maxSizeVal;
          memoryManager.updateMaxSize(maxSize);
    @@ -280,7 +296,12 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
          private FreeList[] freeLists;

          void init() {
    - data = isDirect ? ByteBuffer.allocateDirect(arenaSize) : ByteBuffer.allocate(arenaSize);
    + try {
    + data = isDirect ? ByteBuffer.allocateDirect(arenaSize) : ByteBuffer.allocate(arenaSize);
    + } catch (OutOfMemoryError oom) {
    + throw new OutOfMemoryError("Cannot allocate " + arenaSize + " bytes: " + oom.getMessage()
    + + "; make sure your xmx and process size are set correctly.");
    + }
            int maxMinAllocs = 1 << (arenaSizeLog2 - minAllocLog2);
            headers = new byte[maxMinAllocs];
            int allocLog2Diff = maxAllocLog2 - minAllocLog2, freeListCount = allocLog2Diff + 1;
  • Spena at May 6, 2016 at 8:42 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-common/src/java/org/apache/hadoop/hive/llap/tez/Converters.java
    ----------------------------------------------------------------------
    diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/tez/Converters.java b/llap-common/src/java/org/apache/hadoop/hive/llap/tez/Converters.java
    index ec6e439..e43b72b 100644
    --- a/llap-common/src/java/org/apache/hadoop/hive/llap/tez/Converters.java
    +++ b/llap-common/src/java/org/apache/hadoop/hive/llap/tez/Converters.java
    @@ -22,9 +22,11 @@ import com.google.protobuf.ByteString;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto;
    -import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier;
    +import org.apache.hadoop.yarn.util.ConverterUtils;
      import org.apache.tez.common.TezCommonUtils;
      import org.apache.tez.dag.api.EntityDescriptor;
      import org.apache.tez.dag.api.InputDescriptor;
    @@ -33,7 +35,10 @@ import org.apache.tez.dag.api.ProcessorDescriptor;
      import org.apache.tez.dag.api.TezUncheckedException;
      import org.apache.tez.dag.api.UserPayload;
      import org.apache.tez.dag.api.event.VertexState;
    +import org.apache.tez.dag.records.TezDAGID;
      import org.apache.tez.dag.records.TezTaskAttemptID;
    +import org.apache.tez.dag.records.TezTaskID;
    +import org.apache.tez.dag.records.TezVertexID;
      import org.apache.tez.runtime.api.impl.GroupInputSpec;
      import org.apache.tez.runtime.api.impl.InputSpec;
      import org.apache.tez.runtime.api.impl.OutputSpec;
    @@ -41,55 +46,88 @@ import org.apache.tez.runtime.api.impl.TaskSpec;

      public class Converters {

    - public static TaskSpec getTaskSpecfromProto(FragmentSpecProto FragmentSpecProto) {
    - TezTaskAttemptID taskAttemptID =
    - TezTaskAttemptID.fromString(FragmentSpecProto.getFragmentIdentifierString());
    + public static TaskSpec getTaskSpecfromProto(SignableVertexSpec vectorProto,
    + int fragmentNum, int attemptNum, TezTaskAttemptID attemptId) {
    + VertexIdentifier vertexId = vectorProto.getVertexIdentifier();
    + TezTaskAttemptID taskAttemptID = attemptId != null ? attemptId
    + : createTaskAttemptId(vertexId, fragmentNum, attemptNum);

          ProcessorDescriptor processorDescriptor = null;
    - if (FragmentSpecProto.hasProcessorDescriptor()) {
    + if (vectorProto.hasProcessorDescriptor()) {
            processorDescriptor = convertProcessorDescriptorFromProto(
    - FragmentSpecProto.getProcessorDescriptor());
    + vectorProto.getProcessorDescriptor());
          }

    - List<InputSpec> inputSpecList = new ArrayList<InputSpec>(FragmentSpecProto.getInputSpecsCount());
    - if (FragmentSpecProto.getInputSpecsCount() > 0) {
    - for (IOSpecProto inputSpecProto : FragmentSpecProto.getInputSpecsList()) {
    + List<InputSpec> inputSpecList = new ArrayList<InputSpec>(vectorProto.getInputSpecsCount());
    + if (vectorProto.getInputSpecsCount() > 0) {
    + for (IOSpecProto inputSpecProto : vectorProto.getInputSpecsList()) {
              inputSpecList.add(getInputSpecFromProto(inputSpecProto));
            }
          }

          List<OutputSpec> outputSpecList =
    - new ArrayList<OutputSpec>(FragmentSpecProto.getOutputSpecsCount());
    - if (FragmentSpecProto.getOutputSpecsCount() > 0) {
    - for (IOSpecProto outputSpecProto : FragmentSpecProto.getOutputSpecsList()) {
    + new ArrayList<OutputSpec>(vectorProto.getOutputSpecsCount());
    + if (vectorProto.getOutputSpecsCount() > 0) {
    + for (IOSpecProto outputSpecProto : vectorProto.getOutputSpecsList()) {
              outputSpecList.add(getOutputSpecFromProto(outputSpecProto));
            }
          }

          List<GroupInputSpec> groupInputSpecs =
    - new ArrayList<GroupInputSpec>(FragmentSpecProto.getGroupedInputSpecsCount());
    - if (FragmentSpecProto.getGroupedInputSpecsCount() > 0) {
    - for (GroupInputSpecProto groupInputSpecProto : FragmentSpecProto.getGroupedInputSpecsList()) {
    + new ArrayList<GroupInputSpec>(vectorProto.getGroupedInputSpecsCount());
    + if (vectorProto.getGroupedInputSpecsCount() > 0) {
    + for (GroupInputSpecProto groupInputSpecProto : vectorProto.getGroupedInputSpecsList()) {
              groupInputSpecs.add(getGroupInputSpecFromProto(groupInputSpecProto));
            }
          }

          TaskSpec taskSpec =
    - new TaskSpec(taskAttemptID, FragmentSpecProto.getDagName(), FragmentSpecProto.getVertexName(),
    - FragmentSpecProto.getVertexParallelism(), processorDescriptor, inputSpecList,
    + new TaskSpec(taskAttemptID, vectorProto.getDagName(), vectorProto.getVertexName(),
    + vectorProto.getVertexParallelism(), processorDescriptor, inputSpecList,
                  outputSpecList, groupInputSpecs);
          return taskSpec;
        }

    - public static FragmentSpecProto convertTaskSpecToProto(TaskSpec taskSpec) {
    - FragmentSpecProto.Builder builder = FragmentSpecProto.newBuilder();
    - builder.setFragmentIdentifierString(taskSpec.getTaskAttemptID().toString());
    + public static TezTaskAttemptID createTaskAttemptId(
    + VertexIdentifier vertexId, int fragmentNum, int attemptNum) {
    + // Come ride the API roller-coaster!
    + return TezTaskAttemptID.getInstance(
    + TezTaskID.getInstance(
    + TezVertexID.getInstance(
    + TezDAGID.getInstance(
    + ConverterUtils.toApplicationId(
    + vertexId.getApplicationIdString()),
    + vertexId.getDagId()),
    + vertexId.getVertexId()),
    + fragmentNum),
    + attemptNum);
    + }
    +
    + public static VertexIdentifier createVertexIdentifier(
    + TezTaskAttemptID taId, int appAttemptId) {
    + VertexIdentifier.Builder idBuilder = VertexIdentifier.newBuilder();
    + idBuilder.setApplicationIdString(
    + taId.getTaskID().getVertexID().getDAGId().getApplicationId().toString());
    + idBuilder.setAppAttemptNumber(appAttemptId);
    + idBuilder.setDagId(taId.getTaskID().getVertexID().getDAGId().getId());
    + idBuilder.setVertexId(taId.getTaskID().getVertexID().getId());
    + return idBuilder.build();
    + }
    +
    + public static SignableVertexSpec convertTaskSpecToProto(TaskSpec taskSpec,
    + int appAttemptId, String tokenIdentifier, Integer signatureKeyId, String user) {
    + TezTaskAttemptID tId = taskSpec.getTaskAttemptID();
    +
    + SignableVertexSpec.Builder builder = SignableVertexSpec.newBuilder();
    + builder.setVertexIdentifier(createVertexIdentifier(tId, appAttemptId));
          builder.setDagName(taskSpec.getDAGName());
    - builder.setDagId(taskSpec.getDagIdentifier());
          builder.setVertexName(taskSpec.getVertexName());
          builder.setVertexParallelism(taskSpec.getVertexParallelism());
    - builder.setFragmentNumber(taskSpec.getTaskAttemptID().getTaskID().getId());
    - builder.setAttemptNumber(taskSpec.getTaskAttemptID().getId());
    + builder.setTokenIdentifier(tokenIdentifier);
    + builder.setUser(user);
    + if (signatureKeyId != null) {
    + builder.setSignatureKeyId(signatureKeyId);
    + }

          if (taskSpec.getProcessorDescriptor() != null) {
            builder.setProcessorDescriptor(

    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-common/src/protobuf/LlapDaemonProtocol.proto
    ----------------------------------------------------------------------
    diff --git a/llap-common/src/protobuf/LlapDaemonProtocol.proto b/llap-common/src/protobuf/LlapDaemonProtocol.proto
    index 5cdc02e..486ba0a 100644
    --- a/llap-common/src/protobuf/LlapDaemonProtocol.proto
    +++ b/llap-common/src/protobuf/LlapDaemonProtocol.proto
    @@ -46,19 +46,38 @@ message GroupInputSpecProto {
        optional EntityDescriptorProto merged_input_descriptor = 3;
      }

    +message VertexIdentifier {
    + optional string application_id_string = 1;
    + optional int32 app_attempt_number = 2;
    + optional int32 dag_id = 3;
    + optional int32 vertex_id = 4;
    +}
    +
    +// The part of SubmitWork that can be signed
    +message SignableVertexSpec
    +{
    + optional string user = 1;
    + optional int64 signatureKeyId = 2;
    +
    + optional VertexIdentifier vertexIdentifier = 3;
    + // Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
    + optional string dag_name = 4;
    + optional string vertex_name = 5;
    +
    + // The core vertex stuff
    + optional string token_identifier = 6;
    + optional EntityDescriptorProto processor_descriptor = 7;
    + repeated IOSpecProto input_specs = 8;
    + repeated IOSpecProto output_specs = 9;
    + repeated GroupInputSpecProto grouped_input_specs = 10;
    +
    + optional int32 vertex_parallelism = 11; // An internal field required for Tez.
    +}

    -message FragmentSpecProto {
    - optional string fragment_identifier_string = 1;
    - optional string dag_name = 2;
    - optional int32 dag_id = 11;
    - optional string vertex_name = 3;
    - optional EntityDescriptorProto processor_descriptor = 4;
    - repeated IOSpecProto input_specs = 5;
    - repeated IOSpecProto output_specs = 6;
    - repeated GroupInputSpecProto grouped_input_specs = 7;
    - optional int32 vertex_parallelism = 8;
    - optional int32 fragment_number =9;
    - optional int32 attempt_number = 10;
    +// Union
    +message VertexOrBinary {
    + optional SignableVertexSpec vertex = 1;
    + optional bytes vertexBinary = 2; // SignableVertexSpec
      }

      message FragmentRuntimeInfo {
    @@ -81,18 +100,24 @@ message QueryIdentifierProto {
      }

      message SubmitWorkRequestProto {
    - optional string container_id_string = 1;
    - optional string am_host = 2;
    - optional int32 am_port = 3;
    - optional string token_identifier = 4;
    - optional bytes credentials_binary = 5;
    - optional string user = 6;
    - optional string application_id_string = 7;
    - optional int32 app_attempt_number = 8;
    - optional FragmentSpecProto fragment_spec = 9;
    - optional FragmentRuntimeInfo fragment_runtime_info = 10;
    + optional VertexOrBinary work_spec = 1;
    + optional bytes work_spec_signature = 2;
    +
    + optional int32 fragment_number = 3;
    + optional int32 attempt_number = 4;
    +
    + optional string container_id_string = 5;
    + optional string am_host = 6;
    + optional int32 am_port = 7;
    +
    + // Credentials are not signed - the client can add e.g. his own HDFS tokens.
    + optional bytes credentials_binary = 8;
    +
    + // Not supported/honored for external clients right now.
    + optional FragmentRuntimeInfo fragment_runtime_info = 9;
      }

    +
      enum SubmissionStateProto {
        ACCEPTED = 1;
        REJECTED = 2;

    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-common/src/test/org/apache/hadoop/hive/llap/tez/TestConverters.java
    ----------------------------------------------------------------------
    diff --git a/llap-common/src/test/org/apache/hadoop/hive/llap/tez/TestConverters.java b/llap-common/src/test/org/apache/hadoop/hive/llap/tez/TestConverters.java
    index d4cdac1..349ee14 100644
    --- a/llap-common/src/test/org/apache/hadoop/hive/llap/tez/TestConverters.java
    +++ b/llap-common/src/test/org/apache/hadoop/hive/llap/tez/TestConverters.java
    @@ -23,8 +23,8 @@ import java.util.List;
      import com.google.common.collect.Lists;
      import com.google.protobuf.ByteString;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto;
    -import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto;
      import org.apache.hadoop.yarn.api.records.ApplicationId;
      import org.apache.tez.dag.api.InputDescriptor;
    @@ -77,28 +77,24 @@ public class TestConverters {
              new TaskSpec(tezTaskAttemptId, "dagName", "vertexName", 10, processorDescriptor,
                  inputSpecList, outputSpecList, null);

    + SignableVertexSpec vertexProto = Converters.convertTaskSpecToProto(taskSpec, 0, "", null, "");

    - FragmentSpecProto fragmentSpecProto = Converters.convertTaskSpecToProto(taskSpec);
    -
    -
    - assertEquals("dagName", fragmentSpecProto.getDagName());
    - assertEquals("vertexName", fragmentSpecProto.getVertexName());
    - assertEquals(tezTaskAttemptId.toString(), fragmentSpecProto.getFragmentIdentifierString());
    - assertEquals(tezDagId.getId(), fragmentSpecProto.getDagId());
    - assertEquals(tezTaskAttemptId.getId(), fragmentSpecProto.getAttemptNumber());
    - assertEquals(tezTaskId.getId(), fragmentSpecProto.getFragmentNumber());
    + assertEquals("dagName", vertexProto.getDagName());
    + assertEquals("vertexName", vertexProto.getVertexName());
    + assertEquals(appId.toString(), vertexProto.getVertexIdentifier().getApplicationIdString());
    + assertEquals(tezDagId.getId(), vertexProto.getVertexIdentifier().getDagId());
          assertEquals(processorDescriptor.getClassName(),
    - fragmentSpecProto.getProcessorDescriptor().getClassName());
    + vertexProto.getProcessorDescriptor().getClassName());
          assertEquals(processorDescriptor.getUserPayload().getPayload(),
    - fragmentSpecProto.getProcessorDescriptor().getUserPayload().getUserPayload()
    + vertexProto.getProcessorDescriptor().getUserPayload().getUserPayload()
                  .asReadOnlyByteBuffer());
    - assertEquals(2, fragmentSpecProto.getInputSpecsCount());
    - assertEquals(2, fragmentSpecProto.getOutputSpecsCount());
    + assertEquals(2, vertexProto.getInputSpecsCount());
    + assertEquals(2, vertexProto.getOutputSpecsCount());

    - verifyInputSpecAndProto(inputSpec1, fragmentSpecProto.getInputSpecs(0));
    - verifyInputSpecAndProto(inputSpec2, fragmentSpecProto.getInputSpecs(1));
    - verifyOutputSpecAndProto(outputSpec1, fragmentSpecProto.getOutputSpecs(0));
    - verifyOutputSpecAndProto(outputSpec2, fragmentSpecProto.getOutputSpecs(1));
    + verifyInputSpecAndProto(inputSpec1, vertexProto.getInputSpecs(0));
    + verifyInputSpecAndProto(inputSpec2, vertexProto.getInputSpecs(1));
    + verifyOutputSpecAndProto(outputSpec1, vertexProto.getOutputSpecs(0));
    + verifyOutputSpecAndProto(outputSpec2, vertexProto.getOutputSpecs(1));

        }

    @@ -120,11 +116,10 @@ public class TestConverters {
          TezTaskID tezTaskId = TezTaskID.getInstance(tezVertexId, 500);
          TezTaskAttemptID tezTaskAttemptId = TezTaskAttemptID.getInstance(tezTaskId, 600);

    - FragmentSpecProto.Builder builder = FragmentSpecProto.newBuilder();
    - builder.setFragmentIdentifierString(tezTaskAttemptId.toString());
    + SignableVertexSpec.Builder builder = SignableVertexSpec.newBuilder();
    + builder.setVertexIdentifier(Converters.createVertexIdentifier(tezTaskAttemptId, 0));
          builder.setDagName("dagName");
          builder.setVertexName("vertexName");
    - builder.setDagId(tezDagId.getId());
          builder.setProcessorDescriptor(
              EntityDescriptorProto.newBuilder().setClassName("fakeProcessorName").setUserPayload(
                  UserPayloadProto.newBuilder().setUserPayload(ByteString.copyFrom(procBb))));
    @@ -145,9 +140,9 @@ public class TestConverters {
                  EntityDescriptorProto.newBuilder().setClassName("outputClassName").setUserPayload(
                      UserPayloadProto.newBuilder().setUserPayload(ByteString.copyFrom(output1Bb)))));

    - FragmentSpecProto fragmentSpecProto = builder.build();
    + SignableVertexSpec vertexProto = builder.build();

    - TaskSpec taskSpec = Converters.getTaskSpecfromProto(fragmentSpecProto);
    + TaskSpec taskSpec = Converters.getTaskSpecfromProto(vertexProto, 0, 0, null);

          assertEquals("dagName", taskSpec.getDAGName());
          assertEquals("vertexName", taskSpec.getVertexName());
    @@ -160,12 +155,10 @@ public class TestConverters {
          assertEquals(2, taskSpec.getInputs().size());
          assertEquals(2, taskSpec.getOutputs().size());

    - verifyInputSpecAndProto(taskSpec.getInputs().get(0), fragmentSpecProto.getInputSpecs(0));
    - verifyInputSpecAndProto(taskSpec.getInputs().get(1), fragmentSpecProto.getInputSpecs(1));
    - verifyOutputSpecAndProto(taskSpec.getOutputs().get(0), fragmentSpecProto.getOutputSpecs(0));
    - verifyOutputSpecAndProto(taskSpec.getOutputs().get(1), fragmentSpecProto.getOutputSpecs(1));
    -
    -
    + verifyInputSpecAndProto(taskSpec.getInputs().get(0), vertexProto.getInputSpecs(0));
    + verifyInputSpecAndProto(taskSpec.getInputs().get(1), vertexProto.getInputSpecs(1));
    + verifyOutputSpecAndProto(taskSpec.getOutputs().get(0), vertexProto.getOutputSpecs(0));
    + verifyOutputSpecAndProto(taskSpec.getOutputs().get(1), vertexProto.getOutputSpecs(1));
        }

        private void verifyInputSpecAndProto(InputSpec inputSpec,

    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    index 78b37f7..2bfe3ed 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    @@ -33,11 +33,11 @@ import org.apache.hadoop.hive.llap.daemon.HistoryLogger;
      import org.apache.hadoop.hive.llap.daemon.KilledTaskHandler;
      import org.apache.hadoop.hive.llap.daemon.QueryFailedHandler;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo;
    -import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto;
    @@ -45,7 +45,9 @@ import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWor
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier;
      import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
    +import org.apache.hadoop.hive.llap.tez.Converters;
      import org.apache.hadoop.hive.ql.exec.tez.TezProcessor;
      import org.apache.hadoop.io.DataInputBuffer;
      import org.apache.hadoop.security.Credentials;
    @@ -151,32 +153,35 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu

        @Override
        public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws IOException {
    - HistoryLogger.logFragmentStart(request.getApplicationIdString(), request.getContainerIdString(),
    - localAddress.get().getHostName(), request.getFragmentSpec().getDagName(), request.getFragmentSpec().getDagId(),
    - request.getFragmentSpec().getVertexName(), request.getFragmentSpec().getFragmentNumber(),
    - request.getFragmentSpec().getAttemptNumber());
    + // TODO: also support binary. Actually, we should figure out the binary stuff here and
    + // stop passing the protobuf around. We should pass around some plain objects/values.
    + SignableVertexSpec vertex = request.getWorkSpec().getVertex();
          if (LOG.isInfoEnabled()) {
    - LOG.info("Queueing container for execution: " + stringifySubmitRequest(request));
    + LOG.info("Queueing container for execution: " + stringifySubmitRequest(request, vertex));
          }
    + VertexIdentifier vId = vertex.getVertexIdentifier();
    + TezTaskAttemptID attemptId = Converters.createTaskAttemptId(
    + vId, request.getFragmentNumber(), request.getAttemptNumber());
    + String fragmentIdString = attemptId.toString();
    + HistoryLogger.logFragmentStart(vId.getApplicationIdString(), request.getContainerIdString(),
    + localAddress.get().getHostName(), vertex.getDagName(), vId.getDagId(),
    + vertex.getVertexName(), request.getFragmentNumber(), request.getAttemptNumber());
          // This is the start of container-annotated logging.
          // TODO Reduce the length of this string. Way too verbose at the moment.
    - String ndcContextString = request.getFragmentSpec().getFragmentIdentifierString();
    - NDC.push(ndcContextString);
    + NDC.push(fragmentIdString);
          Scheduler.SubmissionState submissionState;
          SubmitWorkResponseProto.Builder responseBuilder = SubmitWorkResponseProto.newBuilder();
          try {
            Map<String, String> env = new HashMap<>();
            // TODO What else is required in this environment map.
            env.putAll(localEnv);
    - env.put(ApplicationConstants.Environment.USER.name(), request.getUser());
    + env.put(ApplicationConstants.Environment.USER.name(), vertex.getUser());

    - FragmentSpecProto fragmentSpec = request.getFragmentSpec();
    - TezTaskAttemptID taskAttemptId = TezTaskAttemptID.fromString(
    - fragmentSpec.getFragmentIdentifierString());
    + TezTaskAttemptID taskAttemptId = TezTaskAttemptID.fromString(fragmentIdString);
            int dagIdentifier = taskAttemptId.getTaskID().getVertexID().getDAGId().getId();

            QueryIdentifier queryIdentifier = new QueryIdentifier(
    - request.getApplicationIdString(), dagIdentifier);
    + vId.getApplicationIdString(), dagIdentifier);

            Credentials credentials = new Credentials();
            DataInputBuffer dib = new DataInputBuffer();
    @@ -186,14 +191,10 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu

            Token<JobTokenIdentifier> jobToken = TokenCache.getSessionToken(credentials);

    - QueryFragmentInfo fragmentInfo = queryTracker
    - .registerFragment(queryIdentifier, request.getApplicationIdString(),
    - fragmentSpec.getDagName(),
    - dagIdentifier,
    - fragmentSpec.getVertexName(), fragmentSpec.getFragmentNumber(),
    - fragmentSpec.getAttemptNumber(), request.getUser(), request.getFragmentSpec(),
    - jobToken);
    -
    + QueryFragmentInfo fragmentInfo = queryTracker.registerFragment(
    + queryIdentifier, vId.getApplicationIdString(), vertex.getDagName(), dagIdentifier,
    + vertex.getVertexName(), request.getFragmentNumber(), request.getAttemptNumber(),
    + vertex.getUser(), vertex, jobToken, fragmentIdString);

            String[] localDirs = fragmentInfo.getLocalDirs();
            Preconditions.checkNotNull(localDirs);
    @@ -202,14 +203,16 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
            }
            // May need to setup localDir for re-localization, which is usually setup as Environment.PWD.
            // Used for re-localization, to add the user specified configuration (conf_pb_binary_stream)
    - TaskRunnerCallable callable = new TaskRunnerCallable(request, fragmentInfo, new Configuration(getConfig()),
    +
    + Configuration callableConf = new Configuration(getConfig());
    + TaskRunnerCallable callable = new TaskRunnerCallable(request, fragmentInfo, callableConf,
                new LlapExecutionContext(localAddress.get().getHostName(), queryTracker), env,
                credentials, memoryPerExecutor, amReporter, confParams, metrics, killedTaskHandler,
    - this, tezHadoopShim);
    + this, tezHadoopShim, attemptId);
            submissionState = executorService.schedule(callable);

            if (LOG.isInfoEnabled()) {
    - LOG.info("SubmissionState for {} : {} ", ndcContextString, submissionState);
    + LOG.info("SubmissionState for {} : {} ", fragmentIdString, submissionState);
            }

            if (submissionState.equals(Scheduler.SubmissionState.REJECTED)) {
    @@ -300,24 +303,25 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
          return sb.toString();
        }

    - public static String stringifySubmitRequest(SubmitWorkRequestProto request) {
    + public static String stringifySubmitRequest(
    + SubmitWorkRequestProto request, SignableVertexSpec vertex) {
          StringBuilder sb = new StringBuilder();
    - FragmentSpecProto fragmentSpec = request.getFragmentSpec();
          sb.append("am_details=").append(request.getAmHost()).append(":").append(request.getAmPort());
    - sb.append(", taskInfo=").append(fragmentSpec.getFragmentIdentifierString());
    - sb.append(", user=").append(request.getUser());
    - sb.append(", appIdString=").append(request.getApplicationIdString());
    - sb.append(", appAttemptNum=").append(request.getAppAttemptNumber());
    + sb.append(", taskInfo=").append(vertex.getVertexIdentifier()).append(" fragment ")
    + .append(request.getFragmentNumber()).append(" attempt ").append(request.getAttemptNumber());
    + sb.append(", user=").append(vertex.getUser());
    + sb.append(", appIdString=").append(vertex.getVertexIdentifier().getApplicationIdString());
    + sb.append(", appAttemptNum=").append(vertex.getVertexIdentifier().getAppAttemptNumber());
          sb.append(", containerIdString=").append(request.getContainerIdString());
    - sb.append(", dagName=").append(fragmentSpec.getDagName());
    - sb.append(", vertexName=").append(fragmentSpec.getVertexName());
    - sb.append(", processor=").append(fragmentSpec.getProcessorDescriptor().getClassName());
    - sb.append(", numInputs=").append(fragmentSpec.getInputSpecsCount());
    - sb.append(", numOutputs=").append(fragmentSpec.getOutputSpecsCount());
    - sb.append(", numGroupedInputs=").append(fragmentSpec.getGroupedInputSpecsCount());
    + sb.append(", dagName=").append(vertex.getDagName());
    + sb.append(", vertexName=").append(vertex.getVertexName());
    + sb.append(", processor=").append(vertex.getProcessorDescriptor().getClassName());
    + sb.append(", numInputs=").append(vertex.getInputSpecsCount());
    + sb.append(", numOutputs=").append(vertex.getOutputSpecsCount());
    + sb.append(", numGroupedInputs=").append(vertex.getGroupedInputSpecsCount());
          sb.append(", Inputs={");
    - if (fragmentSpec.getInputSpecsCount() > 0) {
    - for (IOSpecProto ioSpec : fragmentSpec.getInputSpecsList()) {
    + if (vertex.getInputSpecsCount() > 0) {
    + for (IOSpecProto ioSpec : vertex.getInputSpecsList()) {
              sb.append("{").append(ioSpec.getConnectedVertexName()).append(",")
                  .append(ioSpec.getIoDescriptor().getClassName()).append(",")
                  .append(ioSpec.getPhysicalEdgeCount()).append("}");
    @@ -325,8 +329,8 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
          }
          sb.append("}");
          sb.append(", Outputs={");
    - if (fragmentSpec.getOutputSpecsCount() > 0) {
    - for (IOSpecProto ioSpec : fragmentSpec.getOutputSpecsList()) {
    + if (vertex.getOutputSpecsCount() > 0) {
    + for (IOSpecProto ioSpec : vertex.getOutputSpecsList()) {
              sb.append("{").append(ioSpec.getConnectedVertexName()).append(",")
                  .append(ioSpec.getIoDescriptor().getClassName()).append(",")
                  .append(ioSpec.getPhysicalEdgeCount()).append("}");
    @@ -334,8 +338,8 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu
          }
          sb.append("}");
          sb.append(", GroupedInputs={");
    - if (fragmentSpec.getGroupedInputSpecsCount() > 0) {
    - for (GroupInputSpecProto group : fragmentSpec.getGroupedInputSpecsList()) {
    + if (vertex.getGroupedInputSpecsCount() > 0) {
    + for (GroupInputSpecProto group : vertex.getGroupedInputSpecsList()) {
              sb.append("{").append("groupName=").append(group.getGroupName()).append(", elements=")
                  .append(group.getGroupVerticesList()).append("}");
              sb.append(group.getGroupVerticesList());

    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryFragmentInfo.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryFragmentInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryFragmentInfo.java
    index 480a394..195775e 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryFragmentInfo.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryFragmentInfo.java
    @@ -21,8 +21,8 @@ import java.util.List;
      import com.google.common.base.Preconditions;
      import org.apache.hadoop.hive.llap.daemon.FinishableStateUpdateHandler;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
    -import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec;
      import org.apache.hadoop.hive.llap.tezplugins.LlapTezUtils;
      import org.slf4j.Logger;
      import org.slf4j.LoggerFactory;
    @@ -35,19 +35,20 @@ public class QueryFragmentInfo {
        private final String vertexName;
        private final int fragmentNumber;
        private final int attemptNumber;
    - private final FragmentSpecProto fragmentSpec;
    + private final SignableVertexSpec vertexSpec;
    + private final String fragmentIdString;

        public QueryFragmentInfo(QueryInfo queryInfo, String vertexName, int fragmentNumber,
    - int attemptNumber,
    - FragmentSpecProto fragmentSpec) {
    + int attemptNumber, SignableVertexSpec vertexSpec, String fragmentIdString) {
          Preconditions.checkNotNull(queryInfo);
          Preconditions.checkNotNull(vertexName);
    - Preconditions.checkNotNull(fragmentSpec);
    + Preconditions.checkNotNull(vertexSpec);
          this.queryInfo = queryInfo;
          this.vertexName = vertexName;
          this.fragmentNumber = fragmentNumber;
          this.attemptNumber = attemptNumber;
    - this.fragmentSpec = fragmentSpec;
    + this.vertexSpec = vertexSpec;
    + this.fragmentIdString = fragmentIdString;
        }

        // Only meant for use by the QueryTracker
    @@ -55,8 +56,8 @@ public class QueryFragmentInfo {
          return this.queryInfo;
        }

    - public FragmentSpecProto getFragmentSpec() {
    - return fragmentSpec;
    + public SignableVertexSpec getVertexSpec() {
    + return vertexSpec;
        }

        public String getVertexName() {
    @@ -72,7 +73,7 @@ public class QueryFragmentInfo {
        }

        public String getFragmentIdentifierString() {
    - return fragmentSpec.getFragmentIdentifierString();
    + return fragmentIdString;
        }

        /**
    @@ -85,7 +86,7 @@ public class QueryFragmentInfo {
         * @return true if the task can finish, false otherwise
         */
        public boolean canFinish() {
    - List<IOSpecProto> inputSpecList = fragmentSpec.getInputSpecsList();
    + List<IOSpecProto> inputSpecList = vertexSpec.getInputSpecsList();
          boolean canFinish = true;
          if (inputSpecList != null && !inputSpecList.isEmpty()) {
            for (IOSpecProto inputSpec : inputSpecList) {
    @@ -126,7 +127,7 @@ public class QueryFragmentInfo {
        public boolean registerForFinishableStateUpdates(FinishableStateUpdateHandler handler,
                                                      boolean lastFinishableState) {
          List<String> sourcesOfInterest = new LinkedList<>();
    - List<IOSpecProto> inputSpecList = fragmentSpec.getInputSpecsList();
    + List<IOSpecProto> inputSpecList = vertexSpec.getInputSpecsList();
          if (inputSpecList != null && !inputSpecList.isEmpty()) {
            for (IOSpecProto inputSpec : inputSpecList) {
              if (LlapTezUtils.isSourceOfInterest(inputSpec.getIoDescriptor().getClassName())) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
    index 8daef9e..6914134 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryInfo.java
    @@ -35,7 +35,7 @@ import com.google.common.collect.Multimap;
      import org.apache.hadoop.fs.FileSystem;
      import org.apache.hadoop.fs.Path;
      import org.apache.hadoop.hive.llap.daemon.FinishableStateUpdateHandler;
    -import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto;

      public class QueryInfo {
    @@ -92,9 +92,10 @@ public class QueryInfo {
          return sourceStateMap;
        }

    - public QueryFragmentInfo registerFragment(String vertexName, int fragmentNumber, int attemptNumber, FragmentSpecProto fragmentSpec) {
    - QueryFragmentInfo fragmentInfo = new QueryFragmentInfo(this, vertexName, fragmentNumber, attemptNumber,
    - fragmentSpec);
    + public QueryFragmentInfo registerFragment(String vertexName, int fragmentNumber,
    + int attemptNumber, SignableVertexSpec vertexSpec, String fragmentIdString) {
    + QueryFragmentInfo fragmentInfo = new QueryFragmentInfo(
    + this, vertexName, fragmentNumber, attemptNumber, vertexSpec, fragmentIdString);
          knownFragments.add(fragmentInfo);
          return fragmentInfo;
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
    index cb3be2b..8abd198 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/QueryTracker.java
    @@ -29,7 +29,7 @@ import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.fs.FileSystem;
      import org.apache.hadoop.hive.conf.HiveConf;
      import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
    -import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto;
      import org.apache.hadoop.hive.llap.shufflehandler.ShuffleHandler;
      import org.apache.hadoop.hive.ql.exec.ObjectCacheFactory;
    @@ -113,20 +113,11 @@ public class QueryTracker extends AbstractService {

        /**
         * Register a new fragment for a specific query
    - * @param queryIdentifier
    - * @param appIdString
    - * @param dagName
    - * @param dagIdentifier
    - * @param vertexName
    - * @param fragmentNumber
    - * @param attemptNumber
    - * @param user
    - * @throws IOException
         */
        QueryFragmentInfo registerFragment(QueryIdentifier queryIdentifier, String appIdString,
            String dagName, int dagIdentifier, String vertexName, int fragmentNumber, int attemptNumber,
    - String user, FragmentSpecProto fragmentSpec, Token<JobTokenIdentifier> appToken)
    - throws IOException {
    + String user, SignableVertexSpec vertex, Token<JobTokenIdentifier> appToken,
    + String fragmentIdString) throws IOException {
          ReadWriteLock dagLock = getDagLock(queryIdentifier);
          dagLock.readLock().lock();
          try {
    @@ -166,7 +157,8 @@ public class QueryTracker extends AbstractService {
                .registerDag(appIdString, dagIdentifier, appToken,
                    user, queryInfo.getLocalDirs());

    - return queryInfo.registerFragment(vertexName, fragmentNumber, attemptNumber, fragmentSpec);
    + return queryInfo.registerFragment(
    + vertexName, fragmentNumber, attemptNumber, vertex, fragmentIdString);
          } finally {
            dagLock.readLock().unlock();
          }

    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
    index 1933eb1..eac0e8f 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
    @@ -43,7 +43,7 @@ import java.util.concurrent.atomic.AtomicLong;
      import org.apache.commons.lang3.exception.ExceptionUtils;
      import org.apache.hadoop.hive.llap.daemon.FinishableStateUpdateHandler;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo;
    -import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec;
      import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
      import org.apache.hadoop.service.AbstractService;
      import org.apache.tez.runtime.task.EndReason;
    @@ -191,8 +191,8 @@ public class TaskExecutorService extends AbstractService implements Scheduler<Ta
            TaskWrapper task = e.getValue();
            boolean isFirst = true;
            TaskRunnerCallable c = task.getTaskRunnerCallable();
    - if (c != null && c.getRequest() != null && c.getRequest().getFragmentSpec() != null) {
    - FragmentSpecProto fs = c.getRequest().getFragmentSpec();
    + if (c != null && c.getVertexSpec() != null) {
    + SignableVertexSpec fs = c.getVertexSpec();
              value.append(isFirst ? " (" : ", ").append(fs.getDagName())
                .append("/").append(fs.getVertexName());
              isFirst = false;
    @@ -781,7 +781,7 @@ public class TaskExecutorService extends AbstractService implements Scheduler<Ta
                ", firstAttemptStartTime=" + taskRunnerCallable.getFragmentRuntimeInfo().getFirstAttemptStartTime() +
                ", dagStartTime=" + taskRunnerCallable.getFragmentRuntimeInfo().getDagStartTime() +
                ", withinDagPriority=" + taskRunnerCallable.getFragmentRuntimeInfo().getWithinDagPriority() +
    - ", vertexParallelism= " + taskRunnerCallable.getFragmentSpec().getVertexParallelism() +
    + ", vertexParallelism= " + taskRunnerCallable.getVertexSpec().getVertexParallelism() +
                ", selfAndUpstreamParallelism= " + taskRunnerCallable.getFragmentRuntimeInfo().getNumSelfAndUpstreamTasks() +
                ", selfAndUpstreamComplete= " + taskRunnerCallable.getFragmentRuntimeInfo().getNumSelfAndUpstreamCompletedTasks() +
                '}';

    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
    index fcfa940..3093de7 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
    @@ -33,8 +33,8 @@ import org.apache.hadoop.hive.llap.daemon.FragmentCompletionHandler;
      import org.apache.hadoop.hive.llap.daemon.HistoryLogger;
      import org.apache.hadoop.hive.llap.daemon.KilledTaskHandler;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo;
    -import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
      import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
      import org.apache.hadoop.hive.llap.protocol.LlapTaskUmbilicalProtocol;
    @@ -113,6 +113,7 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
        private final AtomicBoolean isStarted = new AtomicBoolean(false);
        private final AtomicBoolean isCompleted = new AtomicBoolean(false);
        private final AtomicBoolean killInvoked = new AtomicBoolean(false);
    + private final SignableVertexSpec vertex;

        @VisibleForTesting
        public TaskRunnerCallable(SubmitWorkRequestProto request, QueryFragmentInfo fragmentInfo,
    @@ -123,7 +124,7 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
                           ConfParams confParams, LlapDaemonExecutorMetrics metrics,
                           KilledTaskHandler killedTaskHandler,
                           FragmentCompletionHandler fragmentCompleteHandler,
    - HadoopShim tezHadoopShim) {
    + HadoopShim tezHadoopShim, TezTaskAttemptID attemptId) {
          this.request = request;
          this.fragmentInfo = fragmentInfo;
          this.conf = conf;
    @@ -134,17 +135,20 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
          this.memoryAvailable = memoryAvailable;
          this.confParams = confParams;
          this.jobToken = TokenCache.getSessionToken(credentials);
    - this.taskSpec = Converters.getTaskSpecfromProto(request.getFragmentSpec());
    + // TODO: support binary spec here or above
    + this.vertex = request.getWorkSpec().getVertex();
    + this.taskSpec = Converters.getTaskSpecfromProto(
    + vertex, request.getFragmentNumber(), request.getAttemptNumber(), attemptId);
          this.amReporter = amReporter;
          // Register with the AMReporter when the callable is setup. Unregister once it starts running.
          if (jobToken != null) {
          this.amReporter.registerTask(request.getAmHost(), request.getAmPort(),
    - request.getUser(), jobToken, fragmentInfo.getQueryInfo().getQueryIdentifier());
    + vertex.getUser(), jobToken, fragmentInfo.getQueryInfo().getQueryIdentifier());
          }
          this.metrics = metrics;
    - this.requestId = request.getFragmentSpec().getFragmentIdentifierString();
    + this.requestId = taskSpec.getTaskAttemptID().toString();
          // TODO Change this to the queryId/Name when that's available.
    - this.queryId = request.getFragmentSpec().getDagName();
    + this.queryId = vertex.getDagName();
          this.killedTaskHandler = killedTaskHandler;
          this.fragmentCompletionHanler = fragmentCompleteHandler;
          this.tezHadoopShim = tezHadoopShim;
    @@ -184,16 +188,16 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {

          // TODO Consolidate this code with TezChild.
          runtimeWatch.start();
    - UserGroupInformation taskUgi = UserGroupInformation.createRemoteUser(request.getUser());
    + UserGroupInformation taskUgi = UserGroupInformation.createRemoteUser(vertex.getUser());
          taskUgi.addCredentials(credentials);

          Map<String, ByteBuffer> serviceConsumerMetadata = new HashMap<>();
          serviceConsumerMetadata.put(TezConstants.TEZ_SHUFFLE_HANDLER_SERVICE_ID,
              TezCommonUtils.convertJobTokenToBytes(jobToken));
    - Multimap<String, String> startedInputsMap = createStartedInputMap(request.getFragmentSpec());
    + Multimap<String, String> startedInputsMap = createStartedInputMap(vertex);

          UserGroupInformation taskOwner =
    - UserGroupInformation.createRemoteUser(request.getTokenIdentifier());
    + UserGroupInformation.createRemoteUser(vertex.getTokenIdentifier());
          final InetSocketAddress address =
              NetUtils.createSocketAddrForHost(request.getAmHost(), request.getAmPort());
          SecurityUtil.setTokenService(jobToken, address);
    @@ -228,7 +232,7 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
              if (shouldRunTask) {
                taskRunner = new TezTaskRunner2(conf, taskUgi, fragmentInfo.getLocalDirs(),
                    taskSpec,
    - request.getAppAttemptNumber(),
    + vertex.getVertexIdentifier().getAppAttemptNumber(),
                    serviceConsumerMetadata, envMap, startedInputsMap, taskReporter, executor,
                    objectRegistry,
                    pid,
    @@ -313,7 +317,7 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
         */
        public void reportTaskKilled() {
          killedTaskHandler
    - .taskKilled(request.getAmHost(), request.getAmPort(), request.getUser(), jobToken,
    + .taskKilled(request.getAmHost(), request.getAmPort(), vertex.getUser(), jobToken,
                  fragmentInfo.getQueryInfo().getQueryIdentifier(), taskSpec.getTaskAttemptID());
        }

    @@ -321,15 +325,15 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
          return fragmentInfo.canFinish();
        }

    - private Multimap<String, String> createStartedInputMap(FragmentSpecProto fragmentSpec) {
    + private static Multimap<String, String> createStartedInputMap(SignableVertexSpec vertex) {
          Multimap<String, String> startedInputMap = HashMultimap.create();
          // Let the Processor control start for Broadcast inputs.

          // TODO For now, this affects non broadcast unsorted cases as well. Make use of the edge
          // property when it's available.
    - for (IOSpecProto inputSpec : fragmentSpec.getInputSpecsList()) {
    + for (IOSpecProto inputSpec : vertex.getInputSpecsList()) {
            if (inputSpec.getIoDescriptor().getClassName().equals(UnorderedKVInput.class.getName())) {
    - startedInputMap.put(fragmentSpec.getVertexName(), inputSpec.getConnectedVertexName());
    + startedInputMap.put(vertex.getVertexName(), inputSpec.getConnectedVertexName());
            }
          }
          return startedInputMap;
    @@ -350,7 +354,7 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
        @Override
        public String toString() {
          return requestId + " {canFinish: " + canFinish() +
    - ", vertexParallelism: " + request.getFragmentSpec().getVertexParallelism() +
    + ", vertexParallelism: " + vertex.getVertexParallelism() +
              ", selfAndUpstreamParallelism: " + request.getFragmentRuntimeInfo().getNumSelfAndUpstreamTasks() +
              ", selfAndUpstreamComplete: " + request.getFragmentRuntimeInfo().getNumSelfAndUpstreamCompletedTasks() +
              ", firstAttemptStartTime: " + getFragmentRuntimeInfo().getFirstAttemptStartTime() +
    @@ -454,14 +458,7 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
            fragmentCompletionHanler.fragmentComplete(fragmentInfo);

            taskRunnerCallable.shutdown();
    - HistoryLogger
    - .logFragmentEnd(request.getApplicationIdString(), request.getContainerIdString(),
    - executionContext.getHostName(), request.getFragmentSpec().getDagName(),
    - fragmentInfo.getQueryInfo().getDagIdentifier(),
    - request.getFragmentSpec().getVertexName(),
    - request.getFragmentSpec().getFragmentNumber(),
    - request.getFragmentSpec().getAttemptNumber(), taskRunnerCallable.threadName,
    - taskRunnerCallable.startTime, true);
    + logFragmentEnd(true);
          }

          @Override
    @@ -471,14 +468,15 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
            fragmentCompletionHanler.fragmentComplete(fragmentInfo);
            // TODO HIVE-10236 Report a fatal error over the umbilical
            taskRunnerCallable.shutdown();
    - HistoryLogger
    - .logFragmentEnd(request.getApplicationIdString(), request.getContainerIdString(),
    - executionContext.getHostName(), request.getFragmentSpec().getDagName(),
    - fragmentInfo.getQueryInfo().getDagIdentifier(),
    - request.getFragmentSpec().getVertexName(),
    - request.getFragmentSpec().getFragmentNumber(),
    - request.getFragmentSpec().getAttemptNumber(), taskRunnerCallable.threadName,
    - taskRunnerCallable.startTime, false);
    + logFragmentEnd(false);
    + }
    +
    + protected void logFragmentEnd(boolean success) {
    + HistoryLogger.logFragmentEnd(vertex.getVertexIdentifier().getApplicationIdString(),
    + request.getContainerIdString(), executionContext.getHostName(), vertex.getDagName(),
    + fragmentInfo.getQueryInfo().getDagIdentifier(), vertex.getVertexName(),
    + request.getFragmentNumber(), request.getAttemptNumber(), taskRunnerCallable.threadName,
    + taskRunnerCallable.startTime, success);
          }
        }

    @@ -498,12 +496,14 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
        public static String getTaskIdentifierString(
            SubmitWorkRequestProto request) {
          StringBuilder sb = new StringBuilder();
    - sb.append("AppId=").append(request.getApplicationIdString())
    + // TODO: also support the binary version
    + SignableVertexSpec vertex = request.getWorkSpec().getVertex();
    + sb.append("AppId=").append(vertex.getVertexIdentifier().getApplicationIdString())
              .append(", containerId=").append(request.getContainerIdString())
    - .append(", Dag=").append(request.getFragmentSpec().getDagName())
    - .append(", Vertex=").append(request.getFragmentSpec().getVertexName())
    - .append(", FragmentNum=").append(request.getFragmentSpec().getFragmentNumber())
    - .append(", Attempt=").append(request.getFragmentSpec().getAttemptNumber());
    + .append(", Dag=").append(vertex.getDagName())
    + .append(", Vertex=").append(vertex.getVertexName())
    + .append(", FragmentNum=").append(request.getFragmentNumber())
    + .append(", Attempt=").append(request.getAttemptNumber());
          return sb.toString();
        }

    @@ -511,7 +511,8 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
          return request.getFragmentRuntimeInfo();
        }

    - public FragmentSpecProto getFragmentSpec() {
    - return request.getFragmentSpec();
    + public SignableVertexSpec getVertexSpec() {
    + // TODO: support for binary spec? presumably we'd parse it somewhere earlier
    + return vertex;
        }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java
    index c6ba14e..d699f20 100644
    --- a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java
    +++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorTestHelpers.java
    @@ -26,9 +26,11 @@ import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.hive.llap.daemon.FragmentCompletionHandler;
      import org.apache.hadoop.hive.llap.daemon.KilledTaskHandler;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
    -import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary;
      import org.apache.hadoop.hive.llap.metrics.LlapDaemonExecutorMetrics;
    +import org.apache.hadoop.hive.llap.tez.Converters;
      import org.apache.hadoop.security.Credentials;
      import org.apache.hadoop.yarn.api.records.ApplicationId;
      import org.apache.tez.dag.records.TezDAGID;
    @@ -51,26 +53,25 @@ public class TaskExecutorTestHelpers {
          SubmitWorkRequestProto
              requestProto = createSubmitWorkRequestProto(fragmentNum, parallelism,
              startTime);
    - QueryFragmentInfo queryFragmentInfo = createQueryFragmentInfo(requestProto.getFragmentSpec());
    + QueryFragmentInfo queryFragmentInfo = createQueryFragmentInfo(
    + requestProto.getWorkSpec().getVertex(), requestProto.getFragmentNumber());
          MockRequest mockRequest = new MockRequest(requestProto, queryFragmentInfo, canFinish, workTime);
          return mockRequest;
        }

        public static TaskExecutorService.TaskWrapper createTaskWrapper(
            SubmitWorkRequestProto request, boolean canFinish, int workTime) {
    - QueryFragmentInfo queryFragmentInfo = createQueryFragmentInfo(request.getFragmentSpec());
    + QueryFragmentInfo queryFragmentInfo = createQueryFragmentInfo(
    + request.getWorkSpec().getVertex(), request.getFragmentNumber());
          MockRequest mockRequest = new MockRequest(request, queryFragmentInfo, canFinish, workTime);
          TaskExecutorService.TaskWrapper
              taskWrapper = new TaskExecutorService.TaskWrapper(mockRequest, null);
          return taskWrapper;
        }

    - public static QueryFragmentInfo createQueryFragmentInfo(FragmentSpecProto fragmentSpecProto) {
    - QueryInfo queryInfo = createQueryInfo();
    - QueryFragmentInfo fragmentInfo =
    - new QueryFragmentInfo(queryInfo, "fakeVertexName", fragmentSpecProto.getFragmentNumber(), 0,
    - fragmentSpecProto);
    - return fragmentInfo;
    + public static QueryFragmentInfo createQueryFragmentInfo(
    + SignableVertexSpec vertex, int fragmentNum) {
    + return new QueryFragmentInfo(createQueryInfo(), "fakeVertexName", fragmentNum, 0, vertex, "");
        }

        public static QueryInfo createQueryInfo() {
    @@ -100,20 +101,23 @@ public class TaskExecutorTestHelpers {
          TezTaskAttemptID taId = TezTaskAttemptID.getInstance(tId, fragmentNumber);
          return SubmitWorkRequestProto
              .newBuilder()
    - .setFragmentSpec(
    - FragmentSpecProto
    - .newBuilder()
    - .setAttemptNumber(0)
    + .setAttemptNumber(0)
    + .setFragmentNumber(fragmentNumber)
    + .setWorkSpec(
    + VertexOrBinary.newBuilder().setVertex(
    + SignableVertexSpec.newBuilder()
                      .setDagName("MockDag")
    - .setFragmentNumber(fragmentNumber)
    + .setUser("MockUser")
    + .setTokenIdentifier("MockToken_1")
    + .setVertexIdentifier(Converters.createVertexIdentifier(taId, 0))
                      .setVertexName("MockVertex")
                      .setProcessorDescriptor(
                          LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder()
                              .setClassName("MockProcessor").build())
    - .setFragmentIdentifierString(taId.toString()).build()).setAmHost("localhost")
    - .setAmPort(12345).setAppAttemptNumber(0).setApplicationIdString("MockApp_1")
    - .setContainerIdString("MockContainer_1").setUser("MockUser")
    - .setTokenIdentifier("MockToken_1")
    + .build()).build())
    + .setAmHost("localhost")
    + .setAmPort(12345)
    + .setContainerIdString("MockContainer_1")
              .setFragmentRuntimeInfo(LlapDaemonProtocolProtos
                  .FragmentRuntimeInfo
                  .newBuilder()
    @@ -146,7 +150,7 @@ public class TaskExecutorTestHelpers {
                new ExecutionContextImpl("localhost"), null, new Credentials(), 0, null, null, mock(
                    LlapDaemonExecutorMetrics.class),
                mock(KilledTaskHandler.class), mock(
    - FragmentCompletionHandler.class), new DefaultHadoopShim());
    + FragmentCompletionHandler.class), new DefaultHadoopShim(), null);
            this.workTime = workTime;
            this.canFinish = canFinish;
          }

    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java
    index 08ee769..a250882 100644
    --- a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java
    +++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestFirstInFirstOutComparator.java
    @@ -31,8 +31,11 @@ import org.apache.hadoop.hive.llap.daemon.impl.TaskExecutorService.TaskWrapper;
      import org.apache.hadoop.hive.llap.daemon.impl.TaskRunnerCallable;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto;
    -import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary;
    +import org.apache.hadoop.hive.llap.tez.Converters;
      import org.apache.hadoop.security.Credentials;
      import org.apache.hadoop.yarn.api.records.ApplicationId;
      import org.apache.tez.dag.records.TezDAGID;
    @@ -59,7 +62,7 @@ public class TestFirstInFirstOutComparator {
            super(requestProto, mock(QueryFragmentInfo.class), conf,
                new ExecutionContextImpl("localhost"), null, cred, 0, null, null, null,
                mock(KilledTaskHandler.class), mock(
    - FragmentCompletionHandler.class), new DefaultHadoopShim());
    + FragmentCompletionHandler.class), new DefaultHadoopShim(), null);
            this.workTime = workTime;
            this.canFinish = canFinish;
          }
    @@ -102,19 +105,23 @@ public class TestFirstInFirstOutComparator {
          TezTaskAttemptID taId = TezTaskAttemptID.getInstance(tId, fragmentNumber);
          return SubmitWorkRequestProto
              .newBuilder()
    - .setFragmentSpec(
    - FragmentSpecProto
    + .setAttemptNumber(0)
    + .setFragmentNumber(fragmentNumber)
    + .setWorkSpec(
    + VertexOrBinary.newBuilder().setVertex(
    + SignableVertexSpec
                      .newBuilder()
    - .setAttemptNumber(0)
    + .setVertexIdentifier(Converters.createVertexIdentifier(taId, 0))
                      .setDagName("MockDag")
    - .setFragmentNumber(fragmentNumber)
                      .setVertexName("MockVertex")
    + .setUser("MockUser")
    + .setTokenIdentifier("MockToken_1")
                      .setProcessorDescriptor(
                          EntityDescriptorProto.newBuilder().setClassName("MockProcessor").build())
    - .setFragmentIdentifierString(taId.toString()).build()).setAmHost("localhost")
    - .setAmPort(12345).setAppAttemptNumber(0).setApplicationIdString("MockApp_1")
    - .setContainerIdString("MockContainer_1").setUser("MockUser")
    - .setTokenIdentifier("MockToken_1")
    + .build()).build())
    + .setAmHost("localhost")
    + .setAmPort(12345)
    + .setContainerIdString("MockContainer_1")
              .setFragmentRuntimeInfo(LlapDaemonProtocolProtos
                  .FragmentRuntimeInfo
                  .newBuilder()

    http://git-wip-us.apache.org/repos/asf/hive/blob/0b5c27fd/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
    ----------------------------------------------------------------------
    diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
    index b4b041a..a3f2eb8 100644
    --- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
    +++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
    @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWor
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto;
      import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto;
    +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary;
      import org.apache.hadoop.hive.llap.protocol.LlapTaskUmbilicalProtocol;
      import org.apache.hadoop.hive.llap.registry.ServiceInstance;
      import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
    @@ -89,10 +90,7 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
        private static final Logger LOG = LoggerFactory.getLogger(LlapTaskCommunicator.class);

        private static final boolean isInfoEnabled = LOG.isInfoEnabled();
    - private static final boolean isDebugEnabed = LOG.isDebugEnabled();
    -
    - private final SubmitWorkRequestProto BASE_SUBMIT_WORK_REQUEST;
    -
    +
        private final ConcurrentMap<QueryIdentifierProto, ByteBuffer> credentialMap;

        // Tracks containerIds and taskAttemptIds, so can be kept independent of the running DAG.
    @@ -105,6 +103,8 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
        private long deleteDelayOnDagComplete;
        private final LlapTaskUmbilicalProtocol umbilical;
        private final Token<LlapTokenIdentifier> token;
    + private final int appAttemptId;
    + private final String user;

        // These two structures track the list of known nodes, and the list of nodes which are sending in keep-alive heartbeats.
        // Primarily for debugging purposes a.t.m, since there's some unexplained TASK_TIMEOUTS which are currently being observed.
    @@ -113,8 +113,6 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {

        private final LlapRegistryService serviceRegistry;

    -
    - private volatile int currentDagId;
        private volatile QueryIdentifierProto currentQueryIdentifierProto;

        public LlapTaskCommunicator(
    @@ -138,17 +136,10 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
          serviceRegistry = LlapRegistryService.getClient(conf);

          umbilical = new LlapTaskUmbilicalProtocolImpl(getUmbilical());
    - SubmitWorkRequestProto.Builder baseBuilder = SubmitWorkRequestProto.newBuilder();

          // TODO Avoid reading this from the environment
    - baseBuilder.setUser(System.getenv(ApplicationConstants.Environment.USER.name()));
    - baseBuilder.setApplicationIdString(
    - taskCommunicatorContext.getApplicationAttemptId().getApplicationId().toString());
    - baseBuilder
    - .setAppAttemptNumber(taskCommunicatorContext.getApplicationAttemptId().getAttemptId());
    - baseBuilder.setTokenIdentifier(getTokenIdentifier());
    -
    - BASE_SUBMIT_WORK_REQUEST = baseBuilder.build();
    + user = System.getenv(ApplicationConstants.Environment.USER.name());
    + appAttemptId = taskCommunicatorContext.getApplicationAttemptId().getAttemptId();

          credentialMap = new ConcurrentHashMap<>();
          sourceStateTracker = new SourceStateTracker(getContext(), this);
    @@ -316,7 +307,6 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
                    t = se.getCause();
                  }
                  if (t instanceof RemoteException) {
    - RemoteException re = (RemoteException) t;
                    // All others from the remote service cause the task to FAIL.
                    LOG.info(
                        "Failed to run task: " + taskSpec.getTaskAttemptID() + " on containerId: " +
    @@ -591,8 +581,9 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
                                                                  TaskSpec taskSpec,
                                                                  FragmentRuntimeInfo fragmentRuntimeInfo) throws
            IOException {
    - SubmitWorkRequestProto.Builder builder =
    - SubmitWorkRequestProto.newBuilder(BASE_SUBMIT_WORK_REQUEST);
    + SubmitWorkRequestProto.Builder builder = SubmitWorkRequestProto.newBuilder();
    + builder.setFragmentNumber(taskSpec.getTaskAttemptID().getTaskID().getId());
    + builder.setAttemptNumber(taskSpec.getTaskAttemptID().getId());
          builder.setContainerIdString(containerId.toString());
          builder.setAmHost(getAddress().getHostName());
          builder.setAmPort(getAddress().getPort());
    @@ -607,7 +598,9 @@ public class LlapTaskCommunicator extends TezTaskCommunicatorImpl {
            credentialsBinary = credentialsBinary.duplicate();
          }
          builder.setCredentialsBinary(ByteString.copyFrom(credentialsBinary));
    - builder.setFragmentSpec(Converters.convertTaskSpecToProto(taskSpec));
    + builder.setWorkSpec(VertexOrBinary.newBuilder().setVertex(Converters.convertTaskSpecToProto(
    + taskSpec, appAttemptId, getTokenIdentifier(), null, user)).build());
    + // Don't call builder.setWorkSpecSignature() - Tez doesn't sign fragments
          builder.setFragmentRuntimeInfo(fragmentRuntimeInfo);
          return builder.build();
        }
  • Spena at May 6, 2016 at 8:42 pm
    HIVE-13683 Remove erroneously included patch file (Alan Gates)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/70fe3108
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/70fe3108
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/70fe3108

    Branch: refs/heads/java8
    Commit: 70fe31088639ebfdd114e026d8a332540dfbe3b2
    Parents: 0b5c27f
    Author: Alan Gates <gates@hortonworks.com>
    Authored: Tue May 3 15:51:44 2016 -0700
    Committer: Alan Gates <gates@hortonworks.com>
    Committed: Tue May 3 15:53:19 2016 -0700

    ----------------------------------------------------------------------
      HIVE-13509.2.patch | 478 ------------------------------------------------
      1 file changed, 478 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/70fe3108/HIVE-13509.2.patch
    ----------------------------------------------------------------------
    diff --git a/HIVE-13509.2.patch b/HIVE-13509.2.patch
    deleted file mode 100644
    index 930b1f7..0000000
    --- a/HIVE-13509.2.patch
    +++ /dev/null
    @@ -1,478 +0,0 @@
    -diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
    -index 6b03fcb..d165e7e 100644
    ---- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
    -+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
    -@@ -208,4 +208,7 @@ private HCatConstants() { // restrict instantiation
    - */
    - public static final String HCAT_INPUT_BAD_RECORD_MIN_KEY = "hcat.input.bad.record.min";
    - public static final int HCAT_INPUT_BAD_RECORD_MIN_DEFAULT = 2;
    -+
    -+ public static final String HCAT_INPUT_IGNORE_INVALID_PATH_KEY = "hcat.input.ignore.invalid.path";
    -+ public static final boolean HCAT_INPUT_IGNORE_INVALID_PATH_DEFAULT = false;
    - }
    -diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java
    -index adfaf4e..dbbdd61 100644
    ---- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java
    -+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatBaseInputFormat.java
    -@@ -21,11 +21,11 @@
    -
    - import java.io.IOException;
    - import java.util.ArrayList;
    -+import java.util.Iterator;
    - import java.util.LinkedList;
    - import java.util.Map;
    - import java.util.HashMap;
    - import java.util.List;
    --
    - import org.apache.hadoop.conf.Configuration;
    - import org.apache.hadoop.fs.FileSystem;
    - import org.apache.hadoop.fs.Path;
    -@@ -127,7 +127,10 @@ public static void setOutputSchema(Job job, HCatSchema hcatSchema)
    - //For each matching partition, call getSplits on the underlying InputFormat
    - for (PartInfo partitionInfo : partitionInfoList) {
    - jobConf = HCatUtil.getJobConfFromContext(jobContext);
    -- setInputPath(jobConf, partitionInfo.getLocation());
    -+ List<String> setInputPath = setInputPath(jobConf, partitionInfo.getLocation());
    -+ if (setInputPath.isEmpty()) {
    -+ continue;
    -+ }
    - Map<String, String> jobProperties = partitionInfo.getJobProperties();
    -
    - HCatUtil.copyJobPropertiesToJobConf(jobProperties, jobConf);
    -@@ -281,7 +284,7 @@ private static InputJobInfo getJobInfo(Configuration conf)
    - return (InputJobInfo) HCatUtil.deserialize(jobString);
    - }
    -
    -- private void setInputPath(JobConf jobConf, String location)
    -+ private List<String> setInputPath(JobConf jobConf, String location)
    - throws IOException {
    -
    - // ideally we should just call FileInputFormat.setInputPaths() here - but
    -@@ -322,19 +325,33 @@ private void setInputPath(JobConf jobConf, String location)
    - }
    - pathStrings.add(location.substring(pathStart, length));
    -
    -- Path[] paths = StringUtils.stringToPath(pathStrings.toArray(new String[0]));
    - String separator = "";
    - StringBuilder str = new StringBuilder();
    -
    -- for (Path path : paths) {
    -+ boolean ignoreInvalidPath =jobConf.getBoolean(HCatConstants.HCAT_INPUT_IGNORE_INVALID_PATH_KEY,
    -+ HCatConstants.HCAT_INPUT_IGNORE_INVALID_PATH_DEFAULT);
    -+ Iterator<String> pathIterator = pathStrings.iterator();
    -+ while (pathIterator.hasNext()) {
    -+ String pathString = pathIterator.next();
    -+ if (ignoreInvalidPath && org.apache.commons.lang.StringUtils.isBlank(pathString)) {
    -+ continue;
    -+ }
    -+ Path path = new Path(pathString);
    - FileSystem fs = path.getFileSystem(jobConf);
    -+ if (ignoreInvalidPath && !fs.exists(path)) {
    -+ pathIterator.remove();
    -+ continue;
    -+ }
    - final String qualifiedPath = fs.makeQualified(path).toString();
    - str.append(separator)
    - .append(StringUtils.escapeString(qualifiedPath));
    - separator = StringUtils.COMMA_STR;
    - }
    -
    -- jobConf.set("mapred.input.dir", str.toString());
    -+ if (!ignoreInvalidPath || !pathStrings.isEmpty()) {
    -+ jobConf.set("mapred.input.dir", str.toString());
    -+ }
    -+ return pathStrings;
    - }
    -
    - }
    -diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoader.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoader.java
    -index 2440cb5..4e23fa2 100644
    ---- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoader.java
    -+++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoader.java
    -@@ -66,6 +66,7 @@
    - import org.apache.pig.data.Tuple;
    - import org.apache.pig.impl.logicalLayer.schema.Schema;
    - import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema;
    -+import org.apache.pig.impl.util.PropertiesUtil;
    - import org.joda.time.DateTime;
    - import org.junit.After;
    - import org.junit.Before;
    -@@ -102,6 +103,7 @@
    - add("testReadPartitionedBasic");
    - add("testProjectionsBasic");
    - add("testColumnarStorePushdown2");
    -+ add("testReadMissingPartitionBasicNeg");
    - }});
    - }};
    -
    -@@ -438,6 +440,59 @@ public void testReadPartitionedBasic() throws IOException, CommandNeedRetryExcep
    - }
    -
    - @Test
    -+ public void testReadMissingPartitionBasicNeg() throws IOException, CommandNeedRetryException {
    -+ assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS));
    -+ PigServer server = new PigServer(ExecType.LOCAL);
    -+
    -+ File removedPartitionDir = new File(TEST_WAREHOUSE_DIR + "/" + PARTITIONED_TABLE + "/bkt=0");
    -+ if (!removeDirectory(removedPartitionDir)) {
    -+ System.out.println("Test did not run because its environment could not be set.");
    -+ return;
    -+ }
    -+ driver.run("select * from " + PARTITIONED_TABLE);
    -+ ArrayList<String> valuesReadFromHiveDriver = new ArrayList<String>();
    -+ driver.getResults(valuesReadFromHiveDriver);
    -+ assertTrue(valuesReadFromHiveDriver.size() == 6);
    -+
    -+ server.registerQuery("W = load '" + PARTITIONED_TABLE + "' using org.apache.hive.hcatalog.pig.HCatLoader();");
    -+ Schema dumpedWSchema = server.dumpSchema("W");
    -+ List<FieldSchema> Wfields = dumpedWSchema.getFields();
    -+ assertEquals(3, Wfields.size());
    -+ assertTrue(Wfields.get(0).alias.equalsIgnoreCase("a"));
    -+ assertTrue(Wfields.get(0).type == DataType.INTEGER);
    -+ assertTrue(Wfields.get(1).alias.equalsIgnoreCase("b"));
    -+ assertTrue(Wfields.get(1).type == DataType.CHARARRAY);
    -+ assertTrue(Wfields.get(2).alias.equalsIgnoreCase("bkt"));
    -+ assertTrue(Wfields.get(2).type == DataType.CHARARRAY);
    -+
    -+ try {
    -+ Iterator<Tuple> WIter = server.openIterator("W");
    -+ fail("Should failed in retriving an invalid partition");
    -+ } catch (IOException ioe) {
    -+ // expected
    -+ }
    -+ }
    -+
    -+ private static boolean removeDirectory(File dir) {
    -+ boolean success = false;
    -+ if (dir.isDirectory()) {
    -+ File[] files = dir.listFiles();
    -+ if (files != null && files.length > 0) {
    -+ for (File file : files) {
    -+ success = removeDirectory(file);
    -+ if (!success) {
    -+ return false;
    -+ }
    -+ }
    -+ }
    -+ success = dir.delete();
    -+ } else {
    -+ success = dir.delete();
    -+ }
    -+ return success;
    -+ }
    -+
    -+ @Test
    - public void testProjectionsBasic() throws IOException {
    - assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS));
    -
    -diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderWithProps.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderWithProps.java
    -new file mode 100644
    -index 0000000..41fe79b
    ---- /dev/null
    -+++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderWithProps.java
    -@@ -0,0 +1,305 @@
    -+/**
    -+ * Licensed to the Apache Software Foundation (ASF) under one
    -+ * or more contributor license agreements. See the NOTICE file
    -+ * distributed with this work for additional information
    -+ * regarding copyright ownership. The ASF licenses this file
    -+ * to you under the Apache License, Version 2.0 (the
    -+ * "License"); you may not use this file except in compliance
    -+ * with the License. You may obtain a copy of the License at
    -+ *
    -+ * http://www.apache.org/licenses/LICENSE-2.0
    -+ *
    -+ * Unless required by applicable law or agreed to in writing,
    -+ * software distributed under the License is distributed on an
    -+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    -+ * KIND, either express or implied. See the License for the
    -+ * specific language governing permissions and limitations
    -+ * under the License.
    -+ */
    -+package org.apache.hive.hcatalog.pig;
    -+
    -+import java.io.File;
    -+import java.io.FileWriter;
    -+import java.io.IOException;
    -+import java.io.PrintWriter;
    -+import java.io.RandomAccessFile;
    -+import java.sql.Date;
    -+import java.sql.Timestamp;
    -+import java.util.ArrayList;
    -+import java.util.Collection;
    -+import java.util.HashMap;
    -+import java.util.HashSet;
    -+import java.util.Iterator;
    -+import java.util.List;
    -+import java.util.Map;
    -+import java.util.Properties;
    -+import java.util.Set;
    -+
    -+import org.apache.commons.io.FileUtils;
    -+import org.apache.hadoop.fs.FileSystem;
    -+import org.apache.hadoop.fs.FileUtil;
    -+import org.apache.hadoop.fs.Path;
    -+import org.apache.hadoop.hive.cli.CliSessionState;
    -+import org.apache.hadoop.hive.conf.HiveConf;
    -+import org.apache.hadoop.hive.ql.CommandNeedRetryException;
    -+import org.apache.hadoop.hive.ql.Driver;
    -+import org.apache.hadoop.hive.ql.WindowsPathUtil;
    -+import org.apache.hadoop.hive.ql.io.IOConstants;
    -+import org.apache.hadoop.hive.ql.io.StorageFormats;
    -+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
    -+import org.apache.hadoop.hive.ql.session.SessionState;
    -+import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
    -+import org.apache.hadoop.mapreduce.Job;
    -+import org.apache.hadoop.util.Shell;
    -+import org.apache.hive.hcatalog.HcatTestUtils;
    -+import org.apache.hive.hcatalog.common.HCatUtil;
    -+import org.apache.hive.hcatalog.common.HCatConstants;
    -+import org.apache.hive.hcatalog.data.Pair;
    -+import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
    -+import org.apache.pig.ExecType;
    -+import org.apache.pig.PigRunner;
    -+import org.apache.pig.PigServer;
    -+import org.apache.pig.ResourceStatistics;
    -+import org.apache.pig.tools.pigstats.OutputStats;
    -+import org.apache.pig.tools.pigstats.PigStats;
    -+import org.apache.pig.data.DataType;
    -+import org.apache.pig.data.Tuple;
    -+import org.apache.pig.impl.logicalLayer.schema.Schema;
    -+import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema;
    -+import org.apache.pig.impl.util.PropertiesUtil;
    -+import org.joda.time.DateTime;
    -+import org.junit.After;
    -+import org.junit.Before;
    -+import org.junit.Test;
    -+import org.junit.runner.RunWith;
    -+import org.junit.runners.Parameterized;
    -+import org.slf4j.Logger;
    -+import org.slf4j.LoggerFactory;
    -+
    -+import static org.junit.Assert.*;
    -+import static org.junit.Assume.assumeTrue;
    -+
    -+@RunWith(Parameterized.class)
    -+public class TestHCatLoaderWithProps {
    -+ private static final Logger LOG = LoggerFactory.getLogger(TestHCatLoaderWithProps.class);
    -+ private static final String TEST_DATA_DIR = HCatUtil.makePathASafeFileName(System.getProperty("java.io.tmpdir") +
    -+ File.separator + TestHCatLoaderWithProps.class.getCanonicalName() + "-" + System.currentTimeMillis());
    -+ private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse";
    -+ private static final String BASIC_FILE_NAME = TEST_DATA_DIR + "/basic.input.data";
    -+
    -+ private static final String BASIC_TABLE = "junit_unparted_basic";
    -+ private static final String PARTITIONED_TABLE = "junit_parted_basic";
    -+
    -+ private Driver driver;
    -+ private Map<Integer, Pair<Integer, String>> basicInputData;
    -+
    -+ private static final Map<String, Set<String>> DISABLED_STORAGE_FORMATS =
    -+ new HashMap<String, Set<String>>() {{
    -+ put(IOConstants.PARQUETFILE, new HashSet<String>() {{
    -+ add("testReadMissingPartitionBasic");
    -+ }});
    -+ }};
    -+
    -+ private final String storageFormat;
    -+
    -+ @Parameterized.Parameters
    -+ public static Collection<Object[]> generateParameters() {
    -+ return StorageFormats.names();
    -+ }
    -+
    -+ public TestHCatLoaderWithProps(String storageFormat) {
    -+ this.storageFormat = storageFormat;
    -+ }
    -+
    -+ private void dropTable(String tablename) throws IOException, CommandNeedRetryException {
    -+ dropTable(tablename, driver);
    -+ }
    -+
    -+ static void dropTable(String tablename, Driver driver) throws IOException, CommandNeedRetryException {
    -+ driver.run("drop table if exists " + tablename);
    -+ }
    -+
    -+ private void createTable(String tablename, String schema, String partitionedBy) throws IOException, CommandNeedRetryException {
    -+ createTable(tablename, schema, partitionedBy, driver, storageFormat);
    -+ }
    -+
    -+ static void createTable(String tablename, String schema, String partitionedBy, Driver driver, String storageFormat)
    -+ throws IOException, CommandNeedRetryException {
    -+ String createTable;
    -+ createTable = "create table " + tablename + "(" + schema + ") ";
    -+ if ((partitionedBy != null) && (!partitionedBy.trim().isEmpty())) {
    -+ createTable = createTable + "partitioned by (" + partitionedBy + ") ";
    -+ }
    -+ createTable = createTable + "stored as " +storageFormat;
    -+ executeStatementOnDriver(createTable, driver);
    -+ }
    -+
    -+ private void createTable(String tablename, String schema) throws IOException, CommandNeedRetryException {
    -+ createTable(tablename, schema, null);
    -+ }
    -+
    -+ /**
    -+ * Execute Hive CLI statement
    -+ * @param cmd arbitrary statement to execute
    -+ */
    -+ static void executeStatementOnDriver(String cmd, Driver driver) throws IOException, CommandNeedRetryException {
    -+ LOG.debug("Executing: " + cmd);
    -+ CommandProcessorResponse cpr = driver.run(cmd);
    -+ if(cpr.getResponseCode() != 0) {
    -+ throw new IOException("Failed to execute \"" + cmd + "\". Driver returned " + cpr.getResponseCode() + " Error: " + cpr.getErrorMessage());
    -+ }
    -+ }
    -+
    -+ @Before
    -+ public void setup() throws Exception {
    -+ File f = new File(TEST_WAREHOUSE_DIR);
    -+ if (f.exists()) {
    -+ FileUtil.fullyDelete(f);
    -+ }
    -+ if (!(new File(TEST_WAREHOUSE_DIR).mkdirs())) {
    -+ throw new RuntimeException("Could not create " + TEST_WAREHOUSE_DIR);
    -+ }
    -+
    -+ HiveConf hiveConf = new HiveConf(this.getClass());
    -+ hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    -+ hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    -+ hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    -+ hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
    -+ hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
    -+
    -+ if (Shell.WINDOWS) {
    -+ WindowsPathUtil.convertPathsFromWindowsToHdfs(hiveConf);
    -+ }
    -+
    -+ driver = new Driver(hiveConf);
    -+ SessionState.start(new CliSessionState(hiveConf));
    -+
    -+ createTable(BASIC_TABLE, "a int, b string");
    -+ createTable(PARTITIONED_TABLE, "a int, b string", "bkt string");
    -+
    -+ int LOOP_SIZE = 3;
    -+ String[] input = new String[LOOP_SIZE * LOOP_SIZE];
    -+ basicInputData = new HashMap<Integer, Pair<Integer, String>>();
    -+ int k = 0;
    -+ for (int i = 1; i <= LOOP_SIZE; i++) {
    -+ String si = i + "";
    -+ for (int j = 1; j <= LOOP_SIZE; j++) {
    -+ String sj = "S" + j + "S";
    -+ input[k] = si + "\t" + sj;
    -+ basicInputData.put(k, new Pair<Integer, String>(i, sj));
    -+ k++;
    -+ }
    -+ }
    -+ HcatTestUtils.createTestDataFile(BASIC_FILE_NAME, input);
    -+
    -+ PigServer server = new PigServer(ExecType.LOCAL);
    -+ server.setBatchOn();
    -+ int i = 0;
    -+ server.registerQuery("A = load '" + BASIC_FILE_NAME + "' as (a:int, b:chararray);", ++i);
    -+
    -+ server.registerQuery("store A into '" + BASIC_TABLE + "' using org.apache.hive.hcatalog.pig.HCatStorer();", ++i);
    -+ server.registerQuery("B = foreach A generate a,b;", ++i);
    -+ server.registerQuery("B2 = filter B by a < 2;", ++i);
    -+ server.registerQuery("store B2 into '" + PARTITIONED_TABLE + "' using org.apache.hive.hcatalog.pig.HCatStorer('bkt=0');", ++i);
    -+
    -+ server.registerQuery("C = foreach A generate a,b;", ++i);
    -+ server.registerQuery("C2 = filter C by a >= 2;", ++i);
    -+ server.registerQuery("store C2 into '" + PARTITIONED_TABLE + "' using org.apache.hive.hcatalog.pig.HCatStorer('bkt=1');", ++i);
    -+
    -+ server.executeBatch();
    -+ }
    -+
    -+ @After
    -+ public void tearDown() throws Exception {
    -+ try {
    -+ if (driver != null) {
    -+ dropTable(BASIC_TABLE);
    -+ dropTable(PARTITIONED_TABLE);
    -+ }
    -+ } finally {
    -+ FileUtils.deleteDirectory(new File(TEST_DATA_DIR));
    -+ }
    -+ }
    -+
    -+ @Test
    -+ public void testReadMissingPartitionBasic() throws IOException, CommandNeedRetryException {
    -+ assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS));
    -+ Properties pigProperties = PropertiesUtil.loadDefaultProperties();
    -+ pigProperties.setProperty("hcat.input.ignore.invalid.path", "true");
    -+ PigServer server = new PigServer(ExecType.LOCAL, pigProperties);
    -+
    -+ File removedPartitionDir = new File(TEST_WAREHOUSE_DIR + "/" + PARTITIONED_TABLE + "/bkt=0");
    -+ if (!removeDirectory(removedPartitionDir)) {
    -+ System.out.println("Test did not run because its environment could not be set.");
    -+ return;
    -+ }
    -+ driver.run("select * from " + PARTITIONED_TABLE);
    -+ ArrayList<String> valuesReadFromHiveDriver = new ArrayList<String>();
    -+ driver.getResults(valuesReadFromHiveDriver);
    -+ assertTrue(valuesReadFromHiveDriver.size() == 6);
    -+
    -+ server.registerQuery("W = load '" + PARTITIONED_TABLE + "' using org.apache.hive.hcatalog.pig.HCatLoader();");
    -+ Schema dumpedWSchema = server.dumpSchema("W");
    -+ List<FieldSchema> Wfields = dumpedWSchema.getFields();
    -+ assertEquals(3, Wfields.size());
    -+ assertTrue(Wfields.get(0).alias.equalsIgnoreCase("a"));
    -+ assertTrue(Wfields.get(0).type == DataType.INTEGER);
    -+ assertTrue(Wfields.get(1).alias.equalsIgnoreCase("b"));
    -+ assertTrue(Wfields.get(1).type == DataType.CHARARRAY);
    -+ assertTrue(Wfields.get(2).alias.equalsIgnoreCase("bkt"));
    -+ assertTrue(Wfields.get(2).type == DataType.CHARARRAY);
    -+
    -+ Iterator<Tuple> WIter = server.openIterator("W");
    -+ Collection<Pair<Integer, String>> valuesRead = new ArrayList<Pair<Integer, String>>();
    -+ while (WIter.hasNext()) {
    -+ Tuple t = WIter.next();
    -+ assertTrue(t.size() == 3);
    -+ assertNotNull(t.get(0));
    -+ assertNotNull(t.get(1));
    -+ assertNotNull(t.get(2));
    -+ assertTrue(t.get(0).getClass() == Integer.class);
    -+ assertTrue(t.get(1).getClass() == String.class);
    -+ assertTrue(t.get(2).getClass() == String.class);
    -+ valuesRead.add(new Pair<Integer, String>((Integer) t.get(0), (String) t.get(1)));
    -+ // the returned partition value is always 1
    -+ assertEquals("1", t.get(2));
    -+ }
    -+ assertEquals(valuesReadFromHiveDriver.size(), valuesRead.size());
    -+
    -+ server.registerQuery("P1 = load '" + PARTITIONED_TABLE + "' using org.apache.hive.hcatalog.pig.HCatLoader();");
    -+ server.registerQuery("P1filter = filter P1 by bkt == '0';");
    -+ Iterator<Tuple> P1Iter = server.openIterator("P1filter");
    -+ assertFalse(P1Iter.hasNext());
    -+
    -+ server.registerQuery("P2 = load '" + PARTITIONED_TABLE + "' using org.apache.hive.hcatalog.pig.HCatLoader();");
    -+ server.registerQuery("P2filter = filter P2 by bkt == '1';");
    -+ Iterator<Tuple> P2Iter = server.openIterator("P2filter");
    -+ int count2 = 0;
    -+ while (P2Iter.hasNext()) {
    -+ Tuple t = P2Iter.next();
    -+ assertEquals("1", t.get(2));
    -+ assertTrue(((Integer) t.get(0)) > 1);
    -+ count2++;
    -+ }
    -+ assertEquals(6, count2);
    -+ }
    -+
    -+ private static boolean removeDirectory(File dir) {
    -+ boolean success = false;
    -+ if (dir.isDirectory()) {
    -+ File[] files = dir.listFiles();
    -+ if (files != null && files.length > 0) {
    -+ for (File file : files) {
    -+ success = removeDirectory(file);
    -+ if (!success) {
    -+ return false;
    -+ }
    -+ }
    -+ }
    -+ success = dir.delete();
    -+ } else {
    -+ success = dir.delete();
    -+ }
    -+ return success;
    -+ }
    -+}
  • Spena at May 6, 2016 at 8:42 pm
    HIVE-13674: usingTezAm field not required in LLAP SubmitWorkRequestProto


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4847f652
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4847f652
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4847f652

    Branch: refs/heads/java8
    Commit: 4847f652804f476bbc969716fe7643d8b20eba8c
    Parents: bc75d72
    Author: Jason Dere <jdere@hortonworks.com>
    Authored: Tue May 3 18:38:07 2016 -0700
    Committer: Jason Dere <jdere@hortonworks.com>
    Committed: Tue May 3 18:38:07 2016 -0700

    ----------------------------------------------------------------------
      .../ext/LlapTaskUmbilicalExternalClient.java | 4 +-
      .../daemon/rpc/LlapDaemonProtocolProtos.java | 230 ++++++-------------
      .../src/protobuf/LlapDaemonProtocol.proto | 8 -
      .../hadoop/hive/llap/LlapBaseInputFormat.java | 1 -
      .../llap/daemon/impl/TaskRunnerCallable.java | 3 -
      5 files changed, 71 insertions(+), 175 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/4847f652/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
    ----------------------------------------------------------------------
    diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java b/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
    index 8598bc8..fe2fd7c 100644
    --- a/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
    +++ b/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
    @@ -123,12 +123,10 @@ public class LlapTaskUmbilicalExternalClient extends AbstractService {


        /**
    - * Submit the work for actual execution. This should always have the usingTezAm flag disabled
    + * Submit the work for actual execution.
         * @param submitWorkRequestProto
         */
        public void submitWork(final SubmitWorkRequestProto submitWorkRequestProto, String llapHost, int llapPort, List<TezEvent> tezEvents) {
    - Preconditions.checkArgument(submitWorkRequestProto.getUsingTezAm() == false);
    -
          // Register the pending events to be sent for this spec.
          String fragmentId = submitWorkRequestProto.getFragmentSpec().getFragmentIdentifierString();
          PendingEventData pendingEventData = new PendingEventData(

    http://git-wip-us.apache.org/repos/asf/hive/blob/4847f652/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
    ----------------------------------------------------------------------
    diff --git a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
    index 653e7e0..6a20031 100644
    --- a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
    +++ b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
    @@ -1,5 +1,5 @@
      // Generated by the protocol buffer compiler. DO NOT EDIT!
    -// source: LlapDaemonProtocol.proto
    +// source: llap-common/src/protobuf/LlapDaemonProtocol.proto

      package org.apache.hadoop.hive.llap.daemon.rpc;

    @@ -7334,16 +7334,6 @@ public final class LlapDaemonProtocolProtos {
           * <code>optional .FragmentRuntimeInfo fragment_runtime_info = 10;</code>
           */
          org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfoOrBuilder getFragmentRuntimeInfoOrBuilder();
    -
    - // optional bool usingTezAm = 11 [default = true];
    - /**
    - * <code>optional bool usingTezAm = 11 [default = true];</code>
    - */
    - boolean hasUsingTezAm();
    - /**
    - * <code>optional bool usingTezAm = 11 [default = true];</code>
    - */
    - boolean getUsingTezAm();
        }
        /**
         * Protobuf type {@code SubmitWorkRequestProto}
    @@ -7462,11 +7452,6 @@ public final class LlapDaemonProtocolProtos {
                    bitField0_ |= 0x00000200;
                    break;
                  }
    - case 88: {
    - bitField0_ |= 0x00000400;
    - usingTezAm_ = input.readBool();
    - break;
    - }
                }
              }
            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
    @@ -7814,22 +7799,6 @@ public final class LlapDaemonProtocolProtos {
            return fragmentRuntimeInfo_;
          }

    - // optional bool usingTezAm = 11 [default = true];
    - public static final int USINGTEZAM_FIELD_NUMBER = 11;
    - private boolean usingTezAm_;
    - /**
    - * <code>optional bool usingTezAm = 11 [default = true];</code>
    - */
    - public boolean hasUsingTezAm() {
    - return ((bitField0_ & 0x00000400) == 0x00000400);
    - }
    - /**
    - * <code>optional bool usingTezAm = 11 [default = true];</code>
    - */
    - public boolean getUsingTezAm() {
    - return usingTezAm_;
    - }
    -
          private void initFields() {
            containerIdString_ = "";
            amHost_ = "";
    @@ -7841,7 +7810,6 @@ public final class LlapDaemonProtocolProtos {
            appAttemptNumber_ = 0;
            fragmentSpec_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto.getDefaultInstance();
            fragmentRuntimeInfo_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.getDefaultInstance();
    - usingTezAm_ = true;
          }
          private byte memoizedIsInitialized = -1;
          public final boolean isInitialized() {
    @@ -7885,9 +7853,6 @@ public final class LlapDaemonProtocolProtos {
            if (((bitField0_ & 0x00000200) == 0x00000200)) {
              output.writeMessage(10, fragmentRuntimeInfo_);
            }
    - if (((bitField0_ & 0x00000400) == 0x00000400)) {
    - output.writeBool(11, usingTezAm_);
    - }
            getUnknownFields().writeTo(output);
          }

    @@ -7937,10 +7902,6 @@ public final class LlapDaemonProtocolProtos {
              size += com.google.protobuf.CodedOutputStream
                .computeMessageSize(10, fragmentRuntimeInfo_);
            }
    - if (((bitField0_ & 0x00000400) == 0x00000400)) {
    - size += com.google.protobuf.CodedOutputStream
    - .computeBoolSize(11, usingTezAm_);
    - }
            size += getUnknownFields().getSerializedSize();
            memoizedSerializedSize = size;
            return size;
    @@ -8014,11 +7975,6 @@ public final class LlapDaemonProtocolProtos {
              result = result && getFragmentRuntimeInfo()
                  .equals(other.getFragmentRuntimeInfo());
            }
    - result = result && (hasUsingTezAm() == other.hasUsingTezAm());
    - if (hasUsingTezAm()) {
    - result = result && (getUsingTezAm()
    - == other.getUsingTezAm());
    - }
            result = result &&
                getUnknownFields().equals(other.getUnknownFields());
            return result;
    @@ -8072,10 +8028,6 @@ public final class LlapDaemonProtocolProtos {
              hash = (37 * hash) + FRAGMENT_RUNTIME_INFO_FIELD_NUMBER;
              hash = (53 * hash) + getFragmentRuntimeInfo().hashCode();
            }
    - if (hasUsingTezAm()) {
    - hash = (37 * hash) + USINGTEZAM_FIELD_NUMBER;
    - hash = (53 * hash) + hashBoolean(getUsingTezAm());
    - }
            hash = (29 * hash) + getUnknownFields().hashCode();
            memoizedHashCode = hash;
            return hash;
    @@ -8215,8 +8167,6 @@ public final class LlapDaemonProtocolProtos {
                fragmentRuntimeInfoBuilder_.clear();
              }
              bitField0_ = (bitField0_ & ~0x00000200);
    - usingTezAm_ = true;
    - bitField0_ = (bitField0_ & ~0x00000400);
              return this;
            }

    @@ -8293,10 +8243,6 @@ public final class LlapDaemonProtocolProtos {
              } else {
                result.fragmentRuntimeInfo_ = fragmentRuntimeInfoBuilder_.build();
              }
    - if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
    - to_bitField0_ |= 0x00000400;
    - }
    - result.usingTezAm_ = usingTezAm_;
              result.bitField0_ = to_bitField0_;
              onBuilt();
              return result;
    @@ -8353,9 +8299,6 @@ public final class LlapDaemonProtocolProtos {
              if (other.hasFragmentRuntimeInfo()) {
                mergeFragmentRuntimeInfo(other.getFragmentRuntimeInfo());
              }
    - if (other.hasUsingTezAm()) {
    - setUsingTezAm(other.getUsingTezAm());
    - }
              this.mergeUnknownFields(other.getUnknownFields());
              return this;
            }
    @@ -9089,39 +9032,6 @@ public final class LlapDaemonProtocolProtos {
              return fragmentRuntimeInfoBuilder_;
            }

    - // optional bool usingTezAm = 11 [default = true];
    - private boolean usingTezAm_ = true;
    - /**
    - * <code>optional bool usingTezAm = 11 [default = true];</code>
    - */
    - public boolean hasUsingTezAm() {
    - return ((bitField0_ & 0x00000400) == 0x00000400);
    - }
    - /**
    - * <code>optional bool usingTezAm = 11 [default = true];</code>
    - */
    - public boolean getUsingTezAm() {
    - return usingTezAm_;
    - }
    - /**
    - * <code>optional bool usingTezAm = 11 [default = true];</code>
    - */
    - public Builder setUsingTezAm(boolean value) {
    - bitField0_ |= 0x00000400;
    - usingTezAm_ = value;
    - onChanged();
    - return this;
    - }
    - /**
    - * <code>optional bool usingTezAm = 11 [default = true];</code>
    - */
    - public Builder clearUsingTezAm() {
    - bitField0_ = (bitField0_ & ~0x00000400);
    - usingTezAm_ = true;
    - onChanged();
    - return this;
    - }
    -
            // @@protoc_insertion_point(builder_scope:SubmitWorkRequestProto)
          }

    @@ -14455,74 +14365,74 @@ public final class LlapDaemonProtocolProtos {
            descriptor;
        static {
          java.lang.String[] descriptorData = {
    - "\n\030LlapDaemonProtocol.proto\"9\n\020UserPayloa" +
    - "dProto\022\024\n\014user_payload\030\001 \001(\014\022\017\n\007version\030" +
    - "\002 \001(\005\"j\n\025EntityDescriptorProto\022\022\n\nclass_" +
    - "name\030\001 \001(\t\022\'\n\014user_payload\030\002 \001(\0132\021.UserP" +
    - "ayloadProto\022\024\n\014history_text\030\003 \001(\014\"x\n\013IOS" +
    - "pecProto\022\035\n\025connected_vertex_name\030\001 \001(\t\022" +
    - "-\n\rio_descriptor\030\002 \001(\0132\026.EntityDescripto" +
    - "rProto\022\033\n\023physical_edge_count\030\003 \001(\005\"z\n\023G" +
    - "roupInputSpecProto\022\022\n\ngroup_name\030\001 \001(\t\022\026" +
    - "\n\016group_vertices\030\002 \003(\t\0227\n\027merged_input_d",
    - "escriptor\030\003 \001(\0132\026.EntityDescriptorProto\"" +
    - "\353\002\n\021FragmentSpecProto\022\"\n\032fragment_identi" +
    - "fier_string\030\001 \001(\t\022\020\n\010dag_name\030\002 \001(\t\022\016\n\006d" +
    - "ag_id\030\013 \001(\005\022\023\n\013vertex_name\030\003 \001(\t\0224\n\024proc" +
    - "essor_descriptor\030\004 \001(\0132\026.EntityDescripto" +
    - "rProto\022!\n\013input_specs\030\005 \003(\0132\014.IOSpecProt" +
    - "o\022\"\n\014output_specs\030\006 \003(\0132\014.IOSpecProto\0221\n" +
    - "\023grouped_input_specs\030\007 \003(\0132\024.GroupInputS" +
    - "pecProto\022\032\n\022vertex_parallelism\030\010 \001(\005\022\027\n\017" +
    - "fragment_number\030\t \001(\005\022\026\n\016attempt_number\030",
    - "\n \001(\005\"\344\001\n\023FragmentRuntimeInfo\022#\n\033num_sel" +
    - "f_and_upstream_tasks\030\001 \001(\005\022-\n%num_self_a" +
    - "nd_upstream_completed_tasks\030\002 \001(\005\022\033\n\023wit" +
    - "hin_dag_priority\030\003 \001(\005\022\026\n\016dag_start_time" +
    - "\030\004 \001(\003\022 \n\030first_attempt_start_time\030\005 \001(\003" +
    - "\022\"\n\032current_attempt_start_time\030\006 \001(\003\"F\n\024" +
    - "QueryIdentifierProto\022\026\n\016app_identifier\030\001" +
    - " \001(\t\022\026\n\016dag_identifier\030\002 \001(\005\"\320\002\n\026SubmitW" +
    - "orkRequestProto\022\033\n\023container_id_string\030\001" +
    - " \001(\t\022\017\n\007am_host\030\002 \001(\t\022\017\n\007am_port\030\003 \001(\005\022\030",
    - "\n\020token_identifier\030\004 \001(\t\022\032\n\022credentials_" +
    - "binary\030\005 \001(\014\022\014\n\004user\030\006 \001(\t\022\035\n\025applicatio" +
    - "n_id_string\030\007 \001(\t\022\032\n\022app_attempt_number\030" +
    - "\010 \001(\005\022)\n\rfragment_spec\030\t \001(\0132\022.FragmentS" +
    - "pecProto\0223\n\025fragment_runtime_info\030\n \001(\0132" +
    - "\024.FragmentRuntimeInfo\022\030\n\nusingTezAm\030\013 \001(" +
    - "\010:\004true\"J\n\027SubmitWorkResponseProto\022/\n\020su" +
    - "bmission_state\030\001 \001(\0162\025.SubmissionStatePr" +
    - "oto\"\205\001\n\036SourceStateUpdatedRequestProto\022/" +
    - "\n\020query_identifier\030\001 \001(\0132\025.QueryIdentifi",
    - "erProto\022\020\n\010src_name\030\002 \001(\t\022 \n\005state\030\003 \001(\016" +
    - "2\021.SourceStateProto\"!\n\037SourceStateUpdate" +
    - "dResponseProto\"w\n\031QueryCompleteRequestPr" +
    - "oto\022\020\n\010query_id\030\001 \001(\t\022/\n\020query_identifie" +
    - "r\030\002 \001(\0132\025.QueryIdentifierProto\022\027\n\014delete" +
    - "_delay\030\004 \001(\003:\0010\"\034\n\032QueryCompleteResponse" +
    - "Proto\"t\n\035TerminateFragmentRequestProto\022/" +
    - "\n\020query_identifier\030\001 \001(\0132\025.QueryIdentifi" +
    - "erProto\022\"\n\032fragment_identifier_string\030\002 " +
    - "\001(\t\" \n\036TerminateFragmentResponseProto\"\026\n",
    - "\024GetTokenRequestProto\"&\n\025GetTokenRespons" +
    - "eProto\022\r\n\005token\030\001 \001(\014*2\n\020SourceStateProt" +
    - "o\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNNING\020\002*E\n\024Sub" +
    - "missionStateProto\022\014\n\010ACCEPTED\020\001\022\014\n\010REJEC" +
    - "TED\020\002\022\021\n\rEVICTED_OTHER\020\0032\316\002\n\022LlapDaemonP" +
    - "rotocol\022?\n\nsubmitWork\022\027.SubmitWorkReques" +
    - "tProto\032\030.SubmitWorkResponseProto\022W\n\022sour" +
    - "ceStateUpdated\022\037.SourceStateUpdatedReque" +
    - "stProto\032 .SourceStateUpdatedResponseProt" +
    - "o\022H\n\rqueryComplete\022\032.QueryCompleteReques",
    - "tProto\032\033.QueryCompleteResponseProto\022T\n\021t" +
    - "erminateFragment\022\036.TerminateFragmentRequ" +
    - "estProto\032\037.TerminateFragmentResponseProt" +
    - "o2]\n\026LlapManagementProtocol\022C\n\022getDelega" +
    - "tionToken\022\025.GetTokenRequestProto\032\026.GetTo" +
    - "kenResponseProtoBH\n&org.apache.hadoop.hi" +
    - "ve.llap.daemon.rpcB\030LlapDaemonProtocolPr" +
    - "otos\210\001\001\240\001\001"
    + "\n1llap-common/src/protobuf/LlapDaemonPro" +
    + "tocol.proto\"9\n\020UserPayloadProto\022\024\n\014user_" +
    + "payload\030\001 \001(\014\022\017\n\007version\030\002 \001(\005\"j\n\025Entity" +
    + "DescriptorProto\022\022\n\nclass_name\030\001 \001(\t\022\'\n\014u" +
    + "ser_payload\030\002 \001(\0132\021.UserPayloadProto\022\024\n\014" +
    + "history_text\030\003 \001(\014\"x\n\013IOSpecProto\022\035\n\025con" +
    + "nected_vertex_name\030\001 \001(\t\022-\n\rio_descripto" +
    + "r\030\002 \001(\0132\026.EntityDescriptorProto\022\033\n\023physi" +
    + "cal_edge_count\030\003 \001(\005\"z\n\023GroupInputSpecPr" +
    + "oto\022\022\n\ngroup_name\030\001 \001(\t\022\026\n\016group_vertice",
    + "s\030\002 \003(\t\0227\n\027merged_input_descriptor\030\003 \001(\013" +
    + "2\026.EntityDescriptorProto\"\353\002\n\021FragmentSpe" +
    + "cProto\022\"\n\032fragment_identifier_string\030\001 \001" +
    + "(\t\022\020\n\010dag_name\030\002 \001(\t\022\016\n\006dag_id\030\013 \001(\005\022\023\n\013" +
    + "vertex_name\030\003 \001(\t\0224\n\024processor_descripto" +
    + "r\030\004 \001(\0132\026.EntityDescriptorProto\022!\n\013input" +
    + "_specs\030\005 \003(\0132\014.IOSpecProto\022\"\n\014output_spe" +
    + "cs\030\006 \003(\0132\014.IOSpecProto\0221\n\023grouped_input_" +
    + "specs\030\007 \003(\0132\024.GroupInputSpecProto\022\032\n\022ver" +
    + "tex_parallelism\030\010 \001(\005\022\027\n\017fragment_number",
    + "\030\t \001(\005\022\026\n\016attempt_number\030\n \001(\005\"\344\001\n\023Fragm" +
    + "entRuntimeInfo\022#\n\033num_self_and_upstream_" +
    + "tasks\030\001 \001(\005\022-\n%num_self_and_upstream_com" +
    + "pleted_tasks\030\002 \001(\005\022\033\n\023within_dag_priorit" +
    + "y\030\003 \001(\005\022\026\n\016dag_start_time\030\004 \001(\003\022 \n\030first" +
    + "_attempt_start_time\030\005 \001(\003\022\"\n\032current_att" +
    + "empt_start_time\030\006 \001(\003\"F\n\024QueryIdentifier" +
    + "Proto\022\026\n\016app_identifier\030\001 \001(\t\022\026\n\016dag_ide" +
    + "ntifier\030\002 \001(\005\"\266\002\n\026SubmitWorkRequestProto" +
    + "\022\033\n\023container_id_string\030\001 \001(\t\022\017\n\007am_host",
    + "\030\002 \001(\t\022\017\n\007am_port\030\003 \001(\005\022\030\n\020token_identif" +
    + "ier\030\004 \001(\t\022\032\n\022credentials_binary\030\005 \001(\014\022\014\n" +
    + "\004user\030\006 \001(\t\022\035\n\025application_id_string\030\007 \001" +
    + "(\t\022\032\n\022app_attempt_number\030\010 \001(\005\022)\n\rfragme" +
    + "nt_spec\030\t \001(\0132\022.FragmentSpecProto\0223\n\025fra" +
    + "gment_runtime_info\030\n \001(\0132\024.FragmentRunti" +
    + "meInfo\"J\n\027SubmitWorkResponseProto\022/\n\020sub" +
    + "mission_state\030\001 \001(\0162\025.SubmissionStatePro" +
    + "to\"\205\001\n\036SourceStateUpdatedRequestProto\022/\n" +
    + "\020query_identifier\030\001 \001(\0132\025.QueryIdentifie",
    + "rProto\022\020\n\010src_name\030\002 \001(\t\022 \n\005state\030\003 \001(\0162" +
    + "\021.SourceStateProto\"!\n\037SourceStateUpdated" +
    + "ResponseProto\"w\n\031QueryCompleteRequestPro" +
    + "to\022\020\n\010query_id\030\001 \001(\t\022/\n\020query_identifier" +
    + "\030\002 \001(\0132\025.QueryIdentifierProto\022\027\n\014delete_" +
    + "delay\030\004 \001(\003:\0010\"\034\n\032QueryCompleteResponseP" +
    + "roto\"t\n\035TerminateFragmentRequestProto\022/\n" +
    + "\020query_identifier\030\001 \001(\0132\025.QueryIdentifie" +
    + "rProto\022\"\n\032fragment_identifier_string\030\002 \001" +
    + "(\t\" \n\036TerminateFragmentResponseProto\"\026\n\024",
    + "GetTokenRequestProto\"&\n\025GetTokenResponse" +
    + "Proto\022\r\n\005token\030\001 \001(\014*2\n\020SourceStateProto" +
    + "\022\017\n\013S_SUCCEEDED\020\001\022\r\n\tS_RUNNING\020\002*E\n\024Subm" +
    + "issionStateProto\022\014\n\010ACCEPTED\020\001\022\014\n\010REJECT" +
    + "ED\020\002\022\021\n\rEVICTED_OTHER\020\0032\316\002\n\022LlapDaemonPr" +
    + "otocol\022?\n\nsubmitWork\022\027.SubmitWorkRequest" +
    + "Proto\032\030.SubmitWorkResponseProto\022W\n\022sourc" +
    + "eStateUpdated\022\037.SourceStateUpdatedReques" +
    + "tProto\032 .SourceStateUpdatedResponseProto" +
    + "\022H\n\rqueryComplete\022\032.QueryCompleteRequest",
    + "Proto\032\033.QueryCompleteResponseProto\022T\n\021te" +
    + "rminateFragment\022\036.TerminateFragmentReque" +
    + "stProto\032\037.TerminateFragmentResponseProto" +
    + "2]\n\026LlapManagementProtocol\022C\n\022getDelegat" +
    + "ionToken\022\025.GetTokenRequestProto\032\026.GetTok" +
    + "enResponseProtoBH\n&org.apache.hadoop.hiv" +
    + "e.llap.daemon.rpcB\030LlapDaemonProtocolPro" +
    + "tos\210\001\001\240\001\001"
          };
          com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
            new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
    @@ -14576,7 +14486,7 @@ public final class LlapDaemonProtocolProtos {
                internal_static_SubmitWorkRequestProto_fieldAccessorTable = new
                  com.google.protobuf.GeneratedMessage.FieldAccessorTable(
                    internal_static_SubmitWorkRequestProto_descriptor,
    - new java.lang.String[] { "ContainerIdString", "AmHost", "AmPort", "TokenIdentifier", "CredentialsBinary", "User", "ApplicationIdString", "AppAttemptNumber", "FragmentSpec", "FragmentRuntimeInfo", "UsingTezAm", });
    + new java.lang.String[] { "ContainerIdString", "AmHost", "AmPort", "TokenIdentifier", "CredentialsBinary", "User", "ApplicationIdString", "AppAttemptNumber", "FragmentSpec", "FragmentRuntimeInfo", });
                internal_static_SubmitWorkResponseProto_descriptor =
                  getDescriptor().getMessageTypes().get(8);
                internal_static_SubmitWorkResponseProto_fieldAccessorTable = new

    http://git-wip-us.apache.org/repos/asf/hive/blob/4847f652/llap-common/src/protobuf/LlapDaemonProtocol.proto
    ----------------------------------------------------------------------
    diff --git a/llap-common/src/protobuf/LlapDaemonProtocol.proto b/llap-common/src/protobuf/LlapDaemonProtocol.proto
    index e964c5f..944c96c 100644
    --- a/llap-common/src/protobuf/LlapDaemonProtocol.proto
    +++ b/llap-common/src/protobuf/LlapDaemonProtocol.proto
    @@ -91,7 +91,6 @@ message SubmitWorkRequestProto {
        optional int32 app_attempt_number = 8;
        optional FragmentSpecProto fragment_spec = 9;
        optional FragmentRuntimeInfo fragment_runtime_info = 10;
    - optional bool usingTezAm = 11 [default = true];
      }

      enum SubmissionStateProto {
    @@ -137,18 +136,11 @@ message GetTokenResponseProto {
        optional bytes token = 1;
      }

    -message SendEventsRequestProto {
    -}
    -
    -message SendEventsResponseProto {
    -}
    -
      service LlapDaemonProtocol {
        rpc submitWork(SubmitWorkRequestProto) returns (SubmitWorkResponseProto);
        rpc sourceStateUpdated(SourceStateUpdatedRequestProto) returns (SourceStateUpdatedResponseProto);
        rpc queryComplete(QueryCompleteRequestProto) returns (QueryCompleteResponseProto);
        rpc terminateFragment(TerminateFragmentRequestProto) returns (TerminateFragmentResponseProto);
    - rpc sendEvents(SendEventsRequestProto) return (SendEventsResponseProto);
      }

      service LlapManagementProtocol {

    http://git-wip-us.apache.org/repos/asf/hive/blob/4847f652/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapBaseInputFormat.java
    ----------------------------------------------------------------------
    diff --git a/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapBaseInputFormat.java b/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapBaseInputFormat.java
    index 10d14c0..8db2f88 100644
    --- a/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapBaseInputFormat.java
    +++ b/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapBaseInputFormat.java
    @@ -346,7 +346,6 @@ public class LlapBaseInputFormat<V extends WritableComparable> implements InputF
          runtimeInfo.setNumSelfAndUpstreamCompletedTasks(0);


    - builder.setUsingTezAm(false);
          builder.setFragmentRuntimeInfo(runtimeInfo.build());
          return builder.build();
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/4847f652/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
    index efd6f0a..4a33373 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
    @@ -108,7 +108,6 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
        private final String queryId;
        private final HadoopShim tezHadoopShim;
        private boolean shouldRunTask = true;
    - private final boolean withTezAm;
        final Stopwatch runtimeWatch = new Stopwatch();
        final Stopwatch killtimerWatch = new Stopwatch();
        private final AtomicBoolean isStarted = new AtomicBoolean(false);
    @@ -137,8 +136,6 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
          this.jobToken = TokenCache.getSessionToken(credentials);
          this.taskSpec = Converters.getTaskSpecfromProto(request.getFragmentSpec());
          this.amReporter = amReporter;
    - this.withTezAm = request.getUsingTezAm();
    - LOG.warn("ZZZ: DBG: usingTezAm=" + withTezAm);
          // Register with the AMReporter when the callable is setup. Unregister once it starts running.
          this.amReporter.registerTask(request.getAmHost(), request.getAmPort(),
              request.getUser(), jobToken, fragmentInfo.getQueryInfo().getQueryIdentifier());
  • Spena at May 6, 2016 at 8:42 pm
    HIVE-13646 make hive.optimize.sort.dynamic.partition compatible with ACID tables (Eugene Koifman, reviewed by Wei Zheng)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/87299662
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/87299662
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/87299662

    Branch: refs/heads/java8
    Commit: 8729966296a041b7ea952ba67f148d2c48c27749
    Parents: 70fe310
    Author: Eugene Koifman <ekoifman@hortonworks.com>
    Authored: Tue May 3 17:11:47 2016 -0700
    Committer: Eugene Koifman <ekoifman@hortonworks.com>
    Committed: Tue May 3 17:11:47 2016 -0700

    ----------------------------------------------------------------------
      .../hadoop/hive/ql/parse/SemanticAnalyzer.java | 1 -
      .../dynpart_sort_optimization_acid.q.out | 120 +++++++++++++++----
      2 files changed, 100 insertions(+), 21 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/87299662/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    index 06db7f9..2983d38 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    @@ -7030,7 +7030,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
          conf.setBoolVar(ConfVars.HIVEOPTREDUCEDEDUPLICATION, true);
          conf.setIntVar(ConfVars.HIVEOPTREDUCEDEDUPLICATIONMINREDUCER, 1);
          conf.set(AcidUtils.CONF_ACID_KEY, "true");
    - conf.setBoolVar(ConfVars.HIVEOPTSORTDYNAMICPARTITION, false);

          if (table.getNumBuckets() < 1) {
            throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, table.getTableName());

    http://git-wip-us.apache.org/repos/asf/hive/blob/87299662/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
    index eca29df..62399e3 100644
    --- a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
    +++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
    @@ -380,8 +380,9 @@ POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and ds
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    - Stage-2 depends on stages: Stage-0
    + Stage-2 depends on stages: Stage-1
    + Stage-0 depends on stages: Stage-2
    + Stage-3 depends on stages: Stage-0

      STAGE PLANS:
        Stage: Stage-1
    @@ -397,12 +398,31 @@ STAGE PLANS:
                      Reduce Output Operator
                        key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
                        sort order: +
    - Map-reduce partition columns: UDFToInteger(_col0) (type: int)
                        value expressions: _col3 (type: string)
            Reduce Operator Tree:
              Select Operator
    - expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), VALUE._col2 (type: string)
    - outputColumnNames: _col0, _col1, _col2, _col3
    + expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col2 (type: string)
    + outputColumnNames: _col0, _col3
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    +
    + Stage: Stage-2
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + Reduce Output Operator
    + key expressions: _col3 (type: string), '_bucket_number' (type: string), _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
    + sort order: +++
    + Map-reduce partition columns: _col3 (type: string)
    + value expressions: 'foo' (type: string), 'bar' (type: string)
    + Reduce Operator Tree:
    + Select Operator
    + expressions: KEY._col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY._col3 (type: string), KEY.'_bucket_number' (type: string)
    + outputColumnNames: _col0, _col1, _col2, _col3, '_bucket_number'
                File Output Operator
                  compressed: false
                  table:
    @@ -423,7 +443,7 @@ STAGE PLANS:
                    serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
                    name: default.acid

    - Stage: Stage-2
    + Stage: Stage-3
          Stats-Aggr Operator

      PREHOOK: query: update acid set value = 'bar' where key = 'foo' and ds in ('2008-04-08')
    @@ -875,8 +895,9 @@ POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and ds=
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    - Stage-2 depends on stages: Stage-0
    + Stage-2 depends on stages: Stage-1
    + Stage-0 depends on stages: Stage-2
    + Stage-3 depends on stages: Stage-0

      STAGE PLANS:
        Stage: Stage-1
    @@ -892,12 +913,31 @@ STAGE PLANS:
                      Reduce Output Operator
                        key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
                        sort order: +
    - Map-reduce partition columns: UDFToInteger(_col0) (type: int)
                        value expressions: _col4 (type: int)
            Reduce Operator Tree:
              Select Operator
    - expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), VALUE._col3 (type: int)
    - outputColumnNames: _col0, _col1, _col2, _col3, _col4
    + expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col3 (type: int)
    + outputColumnNames: _col0, _col4
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    +
    + Stage: Stage-2
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + Reduce Output Operator
    + key expressions: '2008-04-08' (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
    + sort order: ++++
    + Map-reduce partition columns: '2008-04-08' (type: string), _col4 (type: int)
    + value expressions: 'foo' (type: string), 'bar' (type: string)
    + Reduce Operator Tree:
    + Select Operator
    + expressions: KEY._col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY._col3 (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string)
    + outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number'
                File Output Operator
                  compressed: false
                  table:
    @@ -919,7 +959,7 @@ STAGE PLANS:
                    serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
                    name: default.acid

    - Stage: Stage-2
    + Stage: Stage-3
          Stats-Aggr Operator

      PREHOOK: query: update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr>=11
    @@ -1053,8 +1093,9 @@ POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and ds=
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    - Stage-2 depends on stages: Stage-0
    + Stage-2 depends on stages: Stage-1
    + Stage-0 depends on stages: Stage-2
    + Stage-3 depends on stages: Stage-0

      STAGE PLANS:
        Stage: Stage-1
    @@ -1070,7 +1111,6 @@ STAGE PLANS:
                      Reduce Output Operator
                        key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
                        sort order: +
    - Map-reduce partition columns: UDFToInteger(_col0) (type: int)
                        value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: int)
            Reduce Operator Tree:
              Select Operator
    @@ -1079,6 +1119,26 @@ STAGE PLANS:
                File Output Operator
                  compressed: false
                  table:
    + input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    +
    + Stage: Stage-2
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + Reduce Output Operator
    + key expressions: _col3 (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
    + sort order: ++++
    + Map-reduce partition columns: _col3 (type: string), _col4 (type: int)
    + value expressions: _col1 (type: string), _col2 (type: string)
    + Reduce Operator Tree:
    + Select Operator
    + expressions: KEY._col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY._col3 (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string)
    + outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number'
    + File Output Operator
    + compressed: false
    + table:
                      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                      output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
                      serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    @@ -1097,7 +1157,7 @@ STAGE PLANS:
                    serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
                    name: default.acid

    - Stage: Stage-2
    + Stage: Stage-3
          Stats-Aggr Operator

      PREHOOK: query: update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr=11
    @@ -1127,8 +1187,9 @@ POSTHOOK: query: explain update acid set value = 'bar' where key = 'foo' and ds=
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    - Stage-2 depends on stages: Stage-0
    + Stage-2 depends on stages: Stage-1
    + Stage-0 depends on stages: Stage-2
    + Stage-3 depends on stages: Stage-0

      STAGE PLANS:
        Stage: Stage-1
    @@ -1144,7 +1205,6 @@ STAGE PLANS:
                      Reduce Output Operator
                        key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
                        sort order: +
    - Map-reduce partition columns: UDFToInteger(_col0) (type: int)
                        value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: int)
            Reduce Operator Tree:
              Select Operator
    @@ -1153,6 +1213,26 @@ STAGE PLANS:
                File Output Operator
                  compressed: false
                  table:
    + input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    +
    + Stage: Stage-2
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + Reduce Output Operator
    + key expressions: _col3 (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
    + sort order: ++++
    + Map-reduce partition columns: _col3 (type: string), _col4 (type: int)
    + value expressions: _col1 (type: string), _col2 (type: string)
    + Reduce Operator Tree:
    + Select Operator
    + expressions: KEY._col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY._col3 (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string)
    + outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number'
    + File Output Operator
    + compressed: false
    + table:
                      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                      output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
                      serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    @@ -1171,7 +1251,7 @@ STAGE PLANS:
                    serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
                    name: default.acid

    - Stage: Stage-2
    + Stage: Stage-3
          Stats-Aggr Operator

      PREHOOK: query: update acid set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr>=11
  • Spena at May 6, 2016 at 8:42 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
    index 051c1f2..2a81c4b 100644
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
    @@ -80,6 +80,8 @@ public class ThriftHiveMetastore {

          public void create_table_with_constraints(Table tbl, List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException;

    + public void drop_constraint(DropConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
    +
          public void drop_table(String dbname, String name, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;

          public void drop_table_with_environment_context(String dbname, String name, boolean deleteData, EnvironmentContext environment_context) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
    @@ -376,6 +378,8 @@ public class ThriftHiveMetastore {

          public void create_table_with_constraints(Table tbl, List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;

    + public void drop_constraint(DropConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
    +
          public void drop_table(String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;

          public void drop_table_with_environment_context(String dbname, String name, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
    @@ -1218,6 +1222,32 @@ public class ThriftHiveMetastore {
            return;
          }

    + public void drop_constraint(DropConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
    + {
    + send_drop_constraint(req);
    + recv_drop_constraint();
    + }
    +
    + public void send_drop_constraint(DropConstraintRequest req) throws org.apache.thrift.TException
    + {
    + drop_constraint_args args = new drop_constraint_args();
    + args.setReq(req);
    + sendBase("drop_constraint", args);
    + }
    +
    + public void recv_drop_constraint() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
    + {
    + drop_constraint_result result = new drop_constraint_result();
    + receiveBase(result, "drop_constraint");
    + if (result.o1 != null) {
    + throw result.o1;
    + }
    + if (result.o3 != null) {
    + throw result.o3;
    + }
    + return;
    + }
    +
          public void drop_table(String dbname, String name, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
          {
            send_drop_table(dbname, name, deleteData);
    @@ -5535,6 +5565,38 @@ public class ThriftHiveMetastore {
            }
          }

    + public void drop_constraint(DropConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
    + checkReady();
    + drop_constraint_call method_call = new drop_constraint_call(req, resultHandler, this, ___protocolFactory, ___transport);
    + this.___currentMethod = method_call;
    + ___manager.call(method_call);
    + }
    +
    + public static class drop_constraint_call extends org.apache.thrift.async.TAsyncMethodCall {
    + private DropConstraintRequest req;
    + public drop_constraint_call(DropConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
    + super(client, protocolFactory, transport, resultHandler, false);
    + this.req = req;
    + }
    +
    + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
    + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_constraint", org.apache.thrift.protocol.TMessageType.CALL, 0));
    + drop_constraint_args args = new drop_constraint_args();
    + args.setReq(req);
    + args.write(prot);
    + prot.writeMessageEnd();
    + }
    +
    + public void getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException {
    + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
    + throw new IllegalStateException("Method call not finished!");
    + }
    + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
    + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
    + (new Client(prot)).recv_drop_constraint();
    + }
    + }
    +
          public void drop_table(String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
            checkReady();
            drop_table_call method_call = new drop_table_call(dbname, name, deleteData, resultHandler, this, ___protocolFactory, ___transport);
    @@ -10078,6 +10140,7 @@ public class ThriftHiveMetastore {
            processMap.put("create_table", new create_table());
            processMap.put("create_table_with_environment_context", new create_table_with_environment_context());
            processMap.put("create_table_with_constraints", new create_table_with_constraints());
    + processMap.put("drop_constraint", new drop_constraint());
            processMap.put("drop_table", new drop_table());
            processMap.put("drop_table_with_environment_context", new drop_table_with_environment_context());
            processMap.put("get_tables", new get_tables());
    @@ -10720,6 +10783,32 @@ public class ThriftHiveMetastore {
            }
          }

    + public static class drop_constraint<I extends Iface> extends org.apache.thrift.ProcessFunction<I, drop_constraint_args> {
    + public drop_constraint() {
    + super("drop_constraint");
    + }
    +
    + public drop_constraint_args getEmptyArgsInstance() {
    + return new drop_constraint_args();
    + }
    +
    + protected boolean isOneway() {
    + return false;
    + }
    +
    + public drop_constraint_result getResult(I iface, drop_constraint_args args) throws org.apache.thrift.TException {
    + drop_constraint_result result = new drop_constraint_result();
    + try {
    + iface.drop_constraint(args.req);
    + } catch (NoSuchObjectException o1) {
    + result.o1 = o1;
    + } catch (MetaException o3) {
    + result.o3 = o3;
    + }
    + return result;
    + }
    + }
    +
          public static class drop_table<I extends Iface> extends org.apache.thrift.ProcessFunction<I, drop_table_args> {
            public drop_table() {
              super("drop_table");
    @@ -13964,6 +14053,7 @@ public class ThriftHiveMetastore {
            processMap.put("create_table", new create_table());
            processMap.put("create_table_with_environment_context", new create_table_with_environment_context());
            processMap.put("create_table_with_constraints", new create_table_with_constraints());
    + processMap.put("drop_constraint", new drop_constraint());
            processMap.put("drop_table", new drop_table());
            processMap.put("drop_table_with_environment_context", new drop_table_with_environment_context());
            processMap.put("get_tables", new get_tables());
    @@ -15307,20 +15397,20 @@ public class ThriftHiveMetastore {
            }
          }

    - public static class drop_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_table_args, Void> {
    - public drop_table() {
    - super("drop_table");
    + public static class drop_constraint<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_constraint_args, Void> {
    + public drop_constraint() {
    + super("drop_constraint");
            }

    - public drop_table_args getEmptyArgsInstance() {
    - return new drop_table_args();
    + public drop_constraint_args getEmptyArgsInstance() {
    + return new drop_constraint_args();
            }

            public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
              return new AsyncMethodCallback<Void>() {
                public void onComplete(Void o) {
    - drop_table_result result = new drop_table_result();
    + drop_constraint_result result = new drop_constraint_result();
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -15332,7 +15422,7 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - drop_table_result result = new drop_table_result();
    + drop_constraint_result result = new drop_constraint_result();
                  if (e instanceof NoSuchObjectException) {
                              result.o1 = (NoSuchObjectException) e;
                              result.setO1IsSet(true);
    @@ -15363,25 +15453,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, drop_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    - iface.drop_table(args.dbname, args.name, args.deleteData,resultHandler);
    + public void start(I iface, drop_constraint_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    + iface.drop_constraint(args.req,resultHandler);
            }
          }

    - public static class drop_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_table_with_environment_context_args, Void> {
    - public drop_table_with_environment_context() {
    - super("drop_table_with_environment_context");
    + public static class drop_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_table_args, Void> {
    + public drop_table() {
    + super("drop_table");
            }

    - public drop_table_with_environment_context_args getEmptyArgsInstance() {
    - return new drop_table_with_environment_context_args();
    + public drop_table_args getEmptyArgsInstance() {
    + return new drop_table_args();
            }

            public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
              return new AsyncMethodCallback<Void>() {
                public void onComplete(Void o) {
    - drop_table_with_environment_context_result result = new drop_table_with_environment_context_result();
    + drop_table_result result = new drop_table_result();
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -15393,7 +15483,7 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - drop_table_with_environment_context_result result = new drop_table_with_environment_context_result();
    + drop_table_result result = new drop_table_result();
                  if (e instanceof NoSuchObjectException) {
                              result.o1 = (NoSuchObjectException) e;
                              result.setO1IsSet(true);
    @@ -15424,259 +15514,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, drop_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    - iface.drop_table_with_environment_context(args.dbname, args.name, args.deleteData, args.environment_context,resultHandler);
    - }
    - }
    -
    - public static class get_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_tables_args, List<String>> {
    - public get_tables() {
    - super("get_tables");
    - }
    -
    - public get_tables_args getEmptyArgsInstance() {
    - return new get_tables_args();
    - }
    -
    - public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    - final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<List<String>>() {
    - public void onComplete(List<String> o) {
    - get_tables_result result = new get_tables_result();
    - result.success = o;
    - try {
    - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    - return;
    - } catch (Exception e) {
    - LOGGER.error("Exception writing to internal frame buffer", e);
    - }
    - fb.close();
    - }
    - public void onError(Exception e) {
    - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    - org.apache.thrift.TBase msg;
    - get_tables_result result = new get_tables_result();
    - if (e instanceof MetaException) {
    - result.o1 = (MetaException) e;
    - result.setO1IsSet(true);
    - msg = result;
    - }
    - else
    - {
    - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    - }
    - try {
    - fcall.sendResponse(fb,msg,msgType,seqid);
    - return;
    - } catch (Exception ex) {
    - LOGGER.error("Exception writing to internal frame buffer", ex);
    - }
    - fb.close();
    - }
    - };
    - }
    -
    - protected boolean isOneway() {
    - return false;
    - }
    -
    - public void start(I iface, get_tables_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
    - iface.get_tables(args.db_name, args.pattern,resultHandler);
    - }
    - }
    -
    - public static class get_table_meta<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_meta_args, List<TableMeta>> {
    - public get_table_meta() {
    - super("get_table_meta");
    - }
    -
    - public get_table_meta_args getEmptyArgsInstance() {
    - return new get_table_meta_args();
    - }
    -
    - public AsyncMethodCallback<List<TableMeta>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    - final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<List<TableMeta>>() {
    - public void onComplete(List<TableMeta> o) {
    - get_table_meta_result result = new get_table_meta_result();
    - result.success = o;
    - try {
    - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    - return;
    - } catch (Exception e) {
    - LOGGER.error("Exception writing to internal frame buffer", e);
    - }
    - fb.close();
    - }
    - public void onError(Exception e) {
    - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    - org.apache.thrift.TBase msg;
    - get_table_meta_result result = new get_table_meta_result();
    - if (e instanceof MetaException) {
    - result.o1 = (MetaException) e;
    - result.setO1IsSet(true);
    - msg = result;
    - }
    - else
    - {
    - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    - }
    - try {
    - fcall.sendResponse(fb,msg,msgType,seqid);
    - return;
    - } catch (Exception ex) {
    - LOGGER.error("Exception writing to internal frame buffer", ex);
    - }
    - fb.close();
    - }
    - };
    - }
    -
    - protected boolean isOneway() {
    - return false;
    - }
    -
    - public void start(I iface, get_table_meta_args args, org.apache.thrift.async.AsyncMethodCallback<List<TableMeta>> resultHandler) throws TException {
    - iface.get_table_meta(args.db_patterns, args.tbl_patterns, args.tbl_types,resultHandler);
    - }
    - }
    -
    - public static class get_all_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_all_tables_args, List<String>> {
    - public get_all_tables() {
    - super("get_all_tables");
    - }
    -
    - public get_all_tables_args getEmptyArgsInstance() {
    - return new get_all_tables_args();
    - }
    -
    - public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    - final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<List<String>>() {
    - public void onComplete(List<String> o) {
    - get_all_tables_result result = new get_all_tables_result();
    - result.success = o;
    - try {
    - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    - return;
    - } catch (Exception e) {
    - LOGGER.error("Exception writing to internal frame buffer", e);
    - }
    - fb.close();
    - }
    - public void onError(Exception e) {
    - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    - org.apache.thrift.TBase msg;
    - get_all_tables_result result = new get_all_tables_result();
    - if (e instanceof MetaException) {
    - result.o1 = (MetaException) e;
    - result.setO1IsSet(true);
    - msg = result;
    - }
    - else
    - {
    - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    - }
    - try {
    - fcall.sendResponse(fb,msg,msgType,seqid);
    - return;
    - } catch (Exception ex) {
    - LOGGER.error("Exception writing to internal frame buffer", ex);
    - }
    - fb.close();
    - }
    - };
    - }
    -
    - protected boolean isOneway() {
    - return false;
    - }
    -
    - public void start(I iface, get_all_tables_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
    - iface.get_all_tables(args.db_name,resultHandler);
    - }
    - }
    -
    - public static class get_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_args, Table> {
    - public get_table() {
    - super("get_table");
    - }
    -
    - public get_table_args getEmptyArgsInstance() {
    - return new get_table_args();
    - }
    -
    - public AsyncMethodCallback<Table> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    - final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Table>() {
    - public void onComplete(Table o) {
    - get_table_result result = new get_table_result();
    - result.success = o;
    - try {
    - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    - return;
    - } catch (Exception e) {
    - LOGGER.error("Exception writing to internal frame buffer", e);
    - }
    - fb.close();
    - }
    - public void onError(Exception e) {
    - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    - org.apache.thrift.TBase msg;
    - get_table_result result = new get_table_result();
    - if (e instanceof MetaException) {
    - result.o1 = (MetaException) e;
    - result.setO1IsSet(true);
    - msg = result;
    - }
    - else if (e instanceof NoSuchObjectException) {
    - result.o2 = (NoSuchObjectException) e;
    - result.setO2IsSet(true);
    - msg = result;
    - }
    - else
    - {
    - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    - }
    - try {
    - fcall.sendResponse(fb,msg,msgType,seqid);
    - return;
    - } catch (Exception ex) {
    - LOGGER.error("Exception writing to internal frame buffer", ex);
    - }
    - fb.close();
    - }
    - };
    - }
    -
    - protected boolean isOneway() {
    - return false;
    - }
    -
    - public void start(I iface, get_table_args args, org.apache.thrift.async.AsyncMethodCallback<Table> resultHandler) throws TException {
    - iface.get_table(args.dbname, args.tbl_name,resultHandler);
    + public void start(I iface, drop_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    + iface.drop_table(args.dbname, args.name, args.deleteData,resultHandler);
            }
          }

    - public static class get_table_objects_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_objects_by_name_args, List<Table>> {
    - public get_table_objects_by_name() {
    - super("get_table_objects_by_name");
    + public static class drop_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_table_with_environment_context_args, Void> {
    + public drop_table_with_environment_context() {
    + super("drop_table_with_environment_context");
            }

    - public get_table_objects_by_name_args getEmptyArgsInstance() {
    - return new get_table_objects_by_name_args();
    + public drop_table_with_environment_context_args getEmptyArgsInstance() {
    + return new drop_table_with_environment_context_args();
            }

    - public AsyncMethodCallback<List<Table>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<List<Table>>() {
    - public void onComplete(List<Table> o) {
    - get_table_objects_by_name_result result = new get_table_objects_by_name_result();
    - result.success = o;
    + return new AsyncMethodCallback<Void>() {
    + public void onComplete(Void o) {
    + drop_table_with_environment_context_result result = new drop_table_with_environment_context_result();
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -15688,19 +15544,14 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - get_table_objects_by_name_result result = new get_table_objects_by_name_result();
    - if (e instanceof MetaException) {
    - result.o1 = (MetaException) e;
    + drop_table_with_environment_context_result result = new drop_table_with_environment_context_result();
    + if (e instanceof NoSuchObjectException) {
    + result.o1 = (NoSuchObjectException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof InvalidOperationException) {
    - result.o2 = (InvalidOperationException) e;
    - result.setO2IsSet(true);
    - msg = result;
    - }
    - else if (e instanceof UnknownDBException) {
    - result.o3 = (UnknownDBException) e;
    + else if (e instanceof MetaException) {
    + result.o3 = (MetaException) e;
                              result.setO3IsSet(true);
                              msg = result;
                  }
    @@ -15724,25 +15575,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, get_table_objects_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<List<Table>> resultHandler) throws TException {
    - iface.get_table_objects_by_name(args.dbname, args.tbl_names,resultHandler);
    + public void start(I iface, drop_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    + iface.drop_table_with_environment_context(args.dbname, args.name, args.deleteData, args.environment_context,resultHandler);
            }
          }

    - public static class get_table_names_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_names_by_filter_args, List<String>> {
    - public get_table_names_by_filter() {
    - super("get_table_names_by_filter");
    + public static class get_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_tables_args, List<String>> {
    + public get_tables() {
    + super("get_tables");
            }

    - public get_table_names_by_filter_args getEmptyArgsInstance() {
    - return new get_table_names_by_filter_args();
    + public get_tables_args getEmptyArgsInstance() {
    + return new get_tables_args();
            }

            public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
              return new AsyncMethodCallback<List<String>>() {
                public void onComplete(List<String> o) {
    - get_table_names_by_filter_result result = new get_table_names_by_filter_result();
    + get_tables_result result = new get_tables_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -15755,22 +15606,12 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - get_table_names_by_filter_result result = new get_table_names_by_filter_result();
    + get_tables_result result = new get_tables_result();
                  if (e instanceof MetaException) {
                              result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof InvalidOperationException) {
    - result.o2 = (InvalidOperationException) e;
    - result.setO2IsSet(true);
    - msg = result;
    - }
    - else if (e instanceof UnknownDBException) {
    - result.o3 = (UnknownDBException) e;
    - result.setO3IsSet(true);
    - msg = result;
    - }
                   else
                  {
                    msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    @@ -15791,25 +15632,26 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, get_table_names_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
    - iface.get_table_names_by_filter(args.dbname, args.filter, args.max_tables,resultHandler);
    + public void start(I iface, get_tables_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
    + iface.get_tables(args.db_name, args.pattern,resultHandler);
            }
          }

    - public static class alter_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_args, Void> {
    - public alter_table() {
    - super("alter_table");
    + public static class get_table_meta<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_meta_args, List<TableMeta>> {
    + public get_table_meta() {
    + super("get_table_meta");
            }

    - public alter_table_args getEmptyArgsInstance() {
    - return new alter_table_args();
    + public get_table_meta_args getEmptyArgsInstance() {
    + return new get_table_meta_args();
            }

    - public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<TableMeta>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Void>() {
    - public void onComplete(Void o) {
    - alter_table_result result = new alter_table_result();
    + return new AsyncMethodCallback<List<TableMeta>>() {
    + public void onComplete(List<TableMeta> o) {
    + get_table_meta_result result = new get_table_meta_result();
    + result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -15821,17 +15663,12 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - alter_table_result result = new alter_table_result();
    - if (e instanceof InvalidOperationException) {
    - result.o1 = (InvalidOperationException) e;
    + get_table_meta_result result = new get_table_meta_result();
    + if (e instanceof MetaException) {
    + result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof MetaException) {
    - result.o2 = (MetaException) e;
    - result.setO2IsSet(true);
    - msg = result;
    - }
                   else
                  {
                    msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    @@ -15852,25 +15689,26 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, alter_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    - iface.alter_table(args.dbname, args.tbl_name, args.new_tbl,resultHandler);
    + public void start(I iface, get_table_meta_args args, org.apache.thrift.async.AsyncMethodCallback<List<TableMeta>> resultHandler) throws TException {
    + iface.get_table_meta(args.db_patterns, args.tbl_patterns, args.tbl_types,resultHandler);
            }
          }

    - public static class alter_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_environment_context_args, Void> {
    - public alter_table_with_environment_context() {
    - super("alter_table_with_environment_context");
    + public static class get_all_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_all_tables_args, List<String>> {
    + public get_all_tables() {
    + super("get_all_tables");
            }

    - public alter_table_with_environment_context_args getEmptyArgsInstance() {
    - return new alter_table_with_environment_context_args();
    + public get_all_tables_args getEmptyArgsInstance() {
    + return new get_all_tables_args();
            }

    - public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Void>() {
    - public void onComplete(Void o) {
    - alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
    + return new AsyncMethodCallback<List<String>>() {
    + public void onComplete(List<String> o) {
    + get_all_tables_result result = new get_all_tables_result();
    + result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -15882,17 +15720,12 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
    - if (e instanceof InvalidOperationException) {
    - result.o1 = (InvalidOperationException) e;
    + get_all_tables_result result = new get_all_tables_result();
    + if (e instanceof MetaException) {
    + result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof MetaException) {
    - result.o2 = (MetaException) e;
    - result.setO2IsSet(true);
    - msg = result;
    - }
                   else
                  {
                    msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    @@ -15913,25 +15746,26 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, alter_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    - iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context,resultHandler);
    + public void start(I iface, get_all_tables_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
    + iface.get_all_tables(args.db_name,resultHandler);
            }
          }

    - public static class alter_table_with_cascade<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_cascade_args, Void> {
    - public alter_table_with_cascade() {
    - super("alter_table_with_cascade");
    + public static class get_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_args, Table> {
    + public get_table() {
    + super("get_table");
            }

    - public alter_table_with_cascade_args getEmptyArgsInstance() {
    - return new alter_table_with_cascade_args();
    + public get_table_args getEmptyArgsInstance() {
    + return new get_table_args();
            }

    - public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<Table> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Void>() {
    - public void onComplete(Void o) {
    - alter_table_with_cascade_result result = new alter_table_with_cascade_result();
    + return new AsyncMethodCallback<Table>() {
    + public void onComplete(Table o) {
    + get_table_result result = new get_table_result();
    + result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -15943,14 +15777,14 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - alter_table_with_cascade_result result = new alter_table_with_cascade_result();
    - if (e instanceof InvalidOperationException) {
    - result.o1 = (InvalidOperationException) e;
    + get_table_result result = new get_table_result();
    + if (e instanceof MetaException) {
    + result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof MetaException) {
    - result.o2 = (MetaException) e;
    + else if (e instanceof NoSuchObjectException) {
    + result.o2 = (NoSuchObjectException) e;
                              result.setO2IsSet(true);
                              msg = result;
                  }
    @@ -15974,25 +15808,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, alter_table_with_cascade_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    - iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade,resultHandler);
    + public void start(I iface, get_table_args args, org.apache.thrift.async.AsyncMethodCallback<Table> resultHandler) throws TException {
    + iface.get_table(args.dbname, args.tbl_name,resultHandler);
            }
          }

    - public static class add_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_args, Partition> {
    - public add_partition() {
    - super("add_partition");
    + public static class get_table_objects_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_objects_by_name_args, List<Table>> {
    + public get_table_objects_by_name() {
    + super("get_table_objects_by_name");
            }

    - public add_partition_args getEmptyArgsInstance() {
    - return new add_partition_args();
    + public get_table_objects_by_name_args getEmptyArgsInstance() {
    + return new get_table_objects_by_name_args();
            }

    - public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<Table>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Partition>() {
    - public void onComplete(Partition o) {
    - add_partition_result result = new add_partition_result();
    + return new AsyncMethodCallback<List<Table>>() {
    + public void onComplete(List<Table> o) {
    + get_table_objects_by_name_result result = new get_table_objects_by_name_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -16005,19 +15839,19 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - add_partition_result result = new add_partition_result();
    - if (e instanceof InvalidObjectException) {
    - result.o1 = (InvalidObjectException) e;
    + get_table_objects_by_name_result result = new get_table_objects_by_name_result();
    + if (e instanceof MetaException) {
    + result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof AlreadyExistsException) {
    - result.o2 = (AlreadyExistsException) e;
    + else if (e instanceof InvalidOperationException) {
    + result.o2 = (InvalidOperationException) e;
                              result.setO2IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof MetaException) {
    - result.o3 = (MetaException) e;
    + else if (e instanceof UnknownDBException) {
    + result.o3 = (UnknownDBException) e;
                              result.setO3IsSet(true);
                              msg = result;
                  }
    @@ -16041,25 +15875,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, add_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    - iface.add_partition(args.new_part,resultHandler);
    + public void start(I iface, get_table_objects_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<List<Table>> resultHandler) throws TException {
    + iface.get_table_objects_by_name(args.dbname, args.tbl_names,resultHandler);
            }
          }

    - public static class add_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_with_environment_context_args, Partition> {
    - public add_partition_with_environment_context() {
    - super("add_partition_with_environment_context");
    + public static class get_table_names_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_names_by_filter_args, List<String>> {
    + public get_table_names_by_filter() {
    + super("get_table_names_by_filter");
            }

    - public add_partition_with_environment_context_args getEmptyArgsInstance() {
    - return new add_partition_with_environment_context_args();
    + public get_table_names_by_filter_args getEmptyArgsInstance() {
    + return new get_table_names_by_filter_args();
            }

    - public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Partition>() {
    - public void onComplete(Partition o) {
    - add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
    + return new AsyncMethodCallback<List<String>>() {
    + public void onComplete(List<String> o) {
    + get_table_names_by_filter_result result = new get_table_names_by_filter_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -16072,19 +15906,19 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
    - if (e instanceof InvalidObjectException) {
    - result.o1 = (InvalidObjectException) e;
    + get_table_names_by_filter_result result = new get_table_names_by_filter_result();
    + if (e instanceof MetaException) {
    + result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof AlreadyExistsException) {
    - result.o2 = (AlreadyExistsException) e;
    + else if (e instanceof InvalidOperationException) {
    + result.o2 = (InvalidOperationException) e;
                              result.setO2IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof MetaException) {
    - result.o3 = (MetaException) e;
    + else if (e instanceof UnknownDBException) {
    + result.o3 = (UnknownDBException) e;
                              result.setO3IsSet(true);
                              msg = result;
                  }
    @@ -16108,27 +15942,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, add_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    - iface.add_partition_with_environment_context(args.new_part, args.environment_context,resultHandler);
    + public void start(I iface, get_table_names_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
    + iface.get_table_names_by_filter(args.dbname, args.filter, args.max_tables,resultHandler);
            }
          }

    - public static class add_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_args, Integer> {
    - public add_partitions() {
    - super("add_partitions");
    + public static class alter_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_args, Void> {
    + public alter_table() {
    + super("alter_table");
            }

    - public add_partitions_args getEmptyArgsInstance() {
    - return new add_partitions_args();
    + public alter_table_args getEmptyArgsInstance() {
    + return new alter_table_args();
            }

    - public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Integer>() {
    - public void onComplete(Integer o) {
    - add_partitions_result result = new add_partitions_result();
    - result.success = o;
    - result.setSuccessIsSet(true);
    + return new AsyncMethodCallback<Void>() {
    + public void onComplete(Void o) {
    + alter_table_result result = new alter_table_result();
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -16140,20 +15972,15 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - add_partitions_result result = new add_partitions_result();
    - if (e instanceof InvalidObjectException) {
    - result.o1 = (InvalidObjectException) e;
    + alter_table_result result = new alter_table_result();
    + if (e instanceof InvalidOperationException) {
    + result.o1 = (InvalidOperationException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof AlreadyExistsException) {
    - result.o2 = (AlreadyExistsException) e;
    - result.setO2IsSet(true);
    - msg = result;
    - }
                  else if (e instanceof MetaException) {
    - result.o3 = (MetaException) e;
    - result.setO3IsSet(true);
    + result.o2 = (MetaException) e;
    + result.setO2IsSet(true);
                              msg = result;
                  }
                   else
    @@ -16176,27 +16003,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, add_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
    - iface.add_partitions(args.new_parts,resultHandler);
    + public void start(I iface, alter_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    + iface.alter_table(args.dbname, args.tbl_name, args.new_tbl,resultHandler);
            }
          }

    - public static class add_partitions_pspec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_pspec_args, Integer> {
    - public add_partitions_pspec() {
    - super("add_partitions_pspec");
    + public static class alter_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_environment_context_args, Void> {
    + public alter_table_with_environment_context() {
    + super("alter_table_with_environment_context");
            }

    - public add_partitions_pspec_args getEmptyArgsInstance() {
    - return new add_partitions_pspec_args();
    + public alter_table_with_environment_context_args getEmptyArgsInstance() {
    + return new alter_table_with_environment_context_args();
            }

    - public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Integer>() {
    - public void onComplete(Integer o) {
    - add_partitions_pspec_result result = new add_partitions_pspec_result();
    - result.success = o;
    - result.setSuccessIsSet(true);
    + return new AsyncMethodCallback<Void>() {
    + public void onComplete(Void o) {
    + alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -16208,20 +16033,15 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - add_partitions_pspec_result result = new add_partitions_pspec_result();
    - if (e instanceof InvalidObjectException) {
    - result.o1 = (InvalidObjectException) e;
    + alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
    + if (e instanceof InvalidOperationException) {
    + result.o1 = (InvalidOperationException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof AlreadyExistsException) {
    - result.o2 = (AlreadyExistsException) e;
    - result.setO2IsSet(true);
    - msg = result;
    - }
                  else if (e instanceof MetaException) {
    - result.o3 = (MetaException) e;
    - result.setO3IsSet(true);
    + result.o2 = (MetaException) e;
    + result.setO2IsSet(true);
                              msg = result;
                  }
                   else
    @@ -16244,26 +16064,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, add_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
    - iface.add_partitions_pspec(args.new_parts,resultHandler);
    + public void start(I iface, alter_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    + iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context,resultHandler);
            }
          }

    - public static class append_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_args, Partition> {
    - public append_partition() {
    - super("append_partition");
    + public static class alter_table_with_cascade<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_cascade_args, Void> {
    + public alter_table_with_cascade() {
    + super("alter_table_with_cascade");
            }

    - public append_partition_args getEmptyArgsInstance() {
    - return new append_partition_args();
    + public alter_table_with_cascade_args getEmptyArgsInstance() {
    + return new alter_table_with_cascade_args();
            }

    - public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Partition>() {
    - public void onComplete(Partition o) {
    - append_partition_result result = new append_partition_result();
    - result.success = o;
    + return new AsyncMethodCallback<Void>() {
    + public void onComplete(Void o) {
    + alter_table_with_cascade_result result = new alter_table_with_cascade_result();
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -16275,20 +16094,15 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - append_partition_result result = new append_partition_result();
    - if (e instanceof InvalidObjectException) {
    - result.o1 = (InvalidObjectException) e;
    + alter_table_with_cascade_result result = new alter_table_with_cascade_result();
    + if (e instanceof InvalidOperationException) {
    + result.o1 = (InvalidOperationException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof AlreadyExistsException) {
    - result.o2 = (AlreadyExistsException) e;
    - result.setO2IsSet(true);
    - msg = result;
    - }
                  else if (e instanceof MetaException) {
    - result.o3 = (MetaException) e;
    - result.setO3IsSet(true);
    + result.o2 = (MetaException) e;
    + result.setO2IsSet(true);
                              msg = result;
                  }
                   else
    @@ -16311,25 +16125,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, append_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    - iface.append_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler);
    + public void start(I iface, alter_table_with_cascade_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    + iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade,resultHandler);
            }
          }

    - public static class add_partitions_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_req_args, AddPartitionsResult> {
    - public add_partitions_req() {
    - super("add_partitions_req");
    + public static class add_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_args, Partition> {
    + public add_partition() {
    + super("add_partition");
            }

    - public add_partitions_req_args getEmptyArgsInstance() {
    - return new add_partitions_req_args();
    + public add_partition_args getEmptyArgsInstance() {
    + return new add_partition_args();
            }

    - public AsyncMethodCallback<AddPartitionsResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<AddPartitionsResult>() {
    - public void onComplete(AddPartitionsResult o) {
    - add_partitions_req_result result = new add_partitions_req_result();
    + return new AsyncMethodCallback<Partition>() {
    + public void onComplete(Partition o) {
    + add_partition_result result = new add_partition_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -16342,7 +16156,7 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - add_partitions_req_result result = new add_partitions_req_result();
    + add_partition_result result = new add_partition_result();
                  if (e instanceof InvalidObjectException) {
                              result.o1 = (InvalidObjectException) e;
                              result.setO1IsSet(true);
    @@ -16378,25 +16192,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, add_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback<AddPartitionsResult> resultHandler) throws TException {
    - iface.add_partitions_req(args.request,resultHandler);
    + public void start(I iface, add_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    + iface.add_partition(args.new_part,resultHandler);
            }
          }

    - public static class append_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_with_environment_context_args, Partition> {
    - public append_partition_with_environment_context() {
    - super("append_partition_with_environment_context");
    + public static class add_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_with_environment_context_args, Partition> {
    + public add_partition_with_environment_context() {
    + super("add_partition_with_environment_context");
            }

    - public append_partition_with_environment_context_args getEmptyArgsInstance() {
    - return new append_partition_with_environment_context_args();
    + public add_partition_with_environment_context_args getEmptyArgsInstance() {
    + return new add_partition_with_environment_context_args();
            }

            public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
              return new AsyncMethodCallback<Partition>() {
                public void onComplete(Partition o) {
    - append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
    + add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -16409,7 +16223,7 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
    + add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
                  if (e instanceof InvalidObjectException) {
                              result.o1 = (InvalidObjectException) e;
                              result.setO1IsSet(true);
    @@ -16445,26 +16259,27 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, append_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    - iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context,resultHandler);
    + public void start(I iface, add_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    + iface.add_partition_with_environment_context(args.new_part, args.environment_context,resultHandler);
            }
          }

    - public static class append_partition_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_args, Partition> {
    - public append_partition_by_name() {
    - super("append_partition_by_name");
    + public static class add_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_args, Integer> {
    + public add_partitions() {
    + super("add_partitions");
            }

    - public append_partition_by_name_args getEmptyArgsInstance() {
    - return new append_partition_by_name_args();
    + public add_partitions_args getEmptyArgsInstance() {
    + return new add_partitions_args();
            }

    - public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Partition>() {
    - public void onComplete(Partition o) {
    - append_partition_by_name_result result = new append_partition_by_name_result();
    + return new AsyncMethodCallback<Integer>() {
    + public void onComplete(Integer o) {
    + add_partitions_result result = new add_partitions_result();
                  result.success = o;
    + result.setSuccessIsSet(true);
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -16476,7 +16291,7 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - append_partition_by_name_result result = new append_partition_by_name_result();
    + add_partitions_result result = new add_partitions_result();
                  if (e instanceof InvalidObjectException) {
                              result.o1 = (InvalidObjectException) e;
                              result.setO1IsSet(true);
    @@ -16512,26 +16327,27 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, append_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    - iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler);
    + public void start(I iface, add_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
    + iface.add_partitions(args.new_parts,resultHandler);
            }
          }

    - public static class append_partition_by_name_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_with_environment_context_args, Partition> {
    - public append_partition_by_name_with_environment_context() {
    - super("append_partition_by_name_with_environment_context");
    + public static class add_partitions_pspec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_pspec_args, Integer> {
    + public add_partitions_pspec() {
    + super("add_partitions_pspec");
            }

    - public append_partition_by_name_with_environment_context_args getEmptyArgsInstance() {
    - return new append_partition_by_name_with_environment_context_args();
    + public add_partitions_pspec_args getEmptyArgsInstance() {
    + return new add_partitions_pspec_args();
            }

    - public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Partition>() {
    - public void onComplete(Partition o) {
    - append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
    + return new AsyncMethodCallback<Integer>() {
    + public void onComplete(Integer o) {
    + add_partitions_pspec_result result = new add_partitions_pspec_result();
                  result.success = o;
    + result.setSuccessIsSet(true);
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -16543,7 +16359,342 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
    + add_partitions_pspec_result result = new add_partitions_pspec_result();
    + if (e instanceof InvalidObjectException) {
    + result.o1 = (InvalidObjectException) e;
    + result.setO1IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof AlreadyExistsException) {
    + result.o2 = (AlreadyExistsException) e;
    + result.setO2IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof MetaException) {
    + result.o3 = (MetaException) e;
    + result.setO3IsSet(true);
    + msg = result;
    + }
    + else
    + {
    + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    + }
    + try {
    + fcall.sendResponse(fb,msg,msgType,seqid);
    + return;
    + } catch (Exception ex) {
    + LOGGER.error("Exception writing to internal frame buffer", ex);
    + }
    + fb.close();
    + }
    + };
    + }
    +
    + protected boolean isOneway() {
    + return false;
    + }
    +
    + public void start(I iface, add_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
    + iface.add_partitions_pspec(args.new_parts,resultHandler);
    + }
    + }
    +
    + public static class append_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_args, Partition> {
    + public append_partition() {
    + super("append_partition");
    + }
    +
    + public append_partition_args getEmptyArgsInstance() {
    + return new append_partition_args();
    + }
    +
    + public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + final org.apache.thrift.AsyncProcessFunction fcall = this;
    + return new AsyncMethodCallback<Partition>() {
    + public void onComplete(Partition o) {
    + append_partition_result result = new append_partition_result();
    + result.success = o;
    + try {
    + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    + return;
    + } catch (Exception e) {
    + LOGGER.error("Exception writing to internal frame buffer", e);
    + }
    + fb.close();
    + }
    + public void onError(Exception e) {
    + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    + org.apache.thrift.TBase msg;
    + append_partition_result result = new append_partition_result();
    + if (e instanceof InvalidObjectException) {
    + result.o1 = (InvalidObjectException) e;
    + result.setO1IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof AlreadyExistsException) {
    + result.o2 = (AlreadyExistsException) e;
    + result.setO2IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof MetaException) {
    + result.o3 = (MetaException) e;
    + result.setO3IsSet(true);
    + msg = result;
    + }
    + else
    + {
    + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    + }
    + try {
    + fcall.sendResponse(fb,msg,msgType,seqid);
    + return;
    + } catch (Exception ex) {
    + LOGGER.error("Exception writing to internal frame buffer", ex);
    + }
    + fb.close();
    + }
    + };
    + }
    +
    + protected boolean isOneway() {
    + return false;
    + }
    +
    + public void start(I iface, append_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    + iface.append_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler);
    + }
    + }
    +
    + public static class add_partitions_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_req_args, AddPartitionsResult> {
    + public add_partitions_req() {
    + super("add_partitions_req");
    + }
    +
    + public add_partitions_req_args getEmptyArgsInstance() {
    + return new add_partitions_req_args();
    + }
    +
    + public AsyncMethodCallback<AddPartitionsResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + final org.apache.thrift.AsyncProcessFunction fcall = this;
    + return new AsyncMethodCallback<AddPartitionsResult>() {
    + public void onComplete(AddPartitionsResult o) {
    + add_partitions_req_result result = new add_partitions_req_result();
    + result.success = o;
    + try {
    + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    + return;
    + } catch (Exception e) {
    + LOGGER.error("Exception writing to internal frame buffer", e);
    + }
    + fb.close();
    + }
    + public void onError(Exception e) {
    + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    + org.apache.thrift.TBase msg;
    + add_partitions_req_result result = new add_partitions_req_result();
    + if (e instanceof InvalidObjectException) {
    + result.o1 = (InvalidObjectException) e;
    + result.setO1IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof AlreadyExistsException) {
    + result.o2 = (AlreadyExistsException) e;
    + result.setO2IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof MetaException) {
    + result.o3 = (MetaException) e;
    + result.setO3IsSet(true);
    + msg = result;
    + }
    + else
    + {
    + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    + }
    + try {
    + fcall.sendResponse(fb,msg,msgType,seqid);
    + return;
    + } catch (Exception ex) {
    + LOGGER.error("Exception writing to internal frame buffer", ex);
    + }
    + fb.close();
    + }
    + };
    + }
    +
    + protected boolean isOneway() {
    + return false;
    + }
    +
    + public void start(I iface, add_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback<AddPartitionsResult> resultHandler) throws TException {
    + iface.add_partitions_req(args.request,resultHandler);
    + }
    + }
    +
    + public static class append_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_with_environment_context_args, Partition> {
    + public append_partition_with_environment_context() {
    + super("append_partition_with_environment_context");
    + }
    +
    + public append_partition_with_environment_context_args getEmptyArgsInstance() {
    + return new append_partition_with_environment_context_args();
    + }
    +
    + public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + final org.apache.thrift.AsyncProcessFunction fcall = this;
    + return new AsyncMethodCallback<Partition>() {
    + public void onComplete(Partition o) {
    + append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
    + result.success = o;
    + try {
    + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    + return;
    + } catch (Exception e) {
    + LOGGER.error("Exception writing to internal frame buffer", e);
    + }
    + fb.close();
    + }
    + public void onError(Exception e) {
    + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    + org.apache.thrift.TBase msg;
    + append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
    + if (e instanceof InvalidObjectException) {
    + result.o1 = (InvalidObjectException) e;
    + result.setO1IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof AlreadyExistsException) {
    + result.o2 = (AlreadyExistsException) e;
    + result.setO2IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof MetaException) {
    + result.o3 = (MetaException) e;
    + result.setO3IsSet(true);
    + msg = result;
    + }
    + else
    + {
    + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    + }
    + try {
    + fcall.sendResponse(fb,msg,msgType,seqid);
    + return;
    + } catch (Exception ex) {
    + LOGGER.error("Exception writing to internal frame buffer", ex);
    + }
    + fb.close();
    + }
    + };
    + }
    +
    + protected boolean isOneway() {
    + return false;
    + }
    +
    + public void start(I iface, append_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    + iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context,resultHandler);
    + }
    + }
    +
    + public static class append_partition_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_args, Partition> {
    + public append_partition_by_name() {
    + super("append_partition_by_name");
    + }
    +
    + public append_partition_by_name_args getEmptyArgsInstance() {
    + return new append_partition_by_name_args();
    + }
    +
    + public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + final org.apache.thrift.AsyncProcessFunction fcall = this;
    + return new AsyncMethodCallback<Partition>() {
    + public void onComplete(Partition o) {
    + append_partition_by_name_result result = new append_partition_by_name_result();
    + result.success = o;
    + try {
    + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    + return;
    + } catch (Exception e) {
    + LOGGER.error("Exception writing to internal frame buffer", e);
    + }
    + fb.close();
    + }
    + public void onError(Exception e) {
    + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    + org.apache.thrift.TBase msg;
    + append_partition_by_name_result result = new append_partition_by_name_result();
    + if (e instanceof InvalidObjectException) {
    + result.o1 = (InvalidObjectException) e;
    + result.setO1IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof AlreadyExistsException) {
    + result.o2 = (AlreadyExistsException) e;
    + result.setO2IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof MetaException) {
    + result.o3 = (MetaException) e;
    + result.setO3IsSet(true);
    + msg = result;
    + }
    + else
    + {
    + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    + }
    + try {
    + fcall.sendResponse(fb,msg,msgType,seqid);
    + return;
    + } catch (Exception ex) {
    + LOGGER.error("Exception writing to internal frame buffer", ex);
    + }
    + fb.close();
    + }
    + };
    + }
    +
    + protected boolean isOneway() {
    + return false;
    + }
    +
    + public void start(I iface, append_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    + iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler);
    + }
    + }
    +
    + public static class append_partition_by_name_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_with_environment_context_args, Partition> {
    + public append_partition_by_name_with_environment_context() {
    + super("append_partition_by_name_with_environment_context");
    + }
    +
    + public append_partition_by_name_with_environment_context_args getEmptyArgsInstance() {
    + return new append_partition_by_name_with_environment_context_args();
    + }
    +
    + public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + final org.apache.thrift.AsyncProcessFunction fcall = this;
    + return new AsyncMethodCallback<Partition>() {
    + public void onComplete(Partition o) {
    + append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
    + result.success = o;
    + try {
    + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    + return;
    + } catch (Exception e) {
    + LOGGER.error("Exception writing to internal frame buffer", e);
    + }
    + fb.close();
    + }
    + public void onError(Exception e) {
    + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    + org.apache.thrift.TBase msg;
    + append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
                  if (e instanceof InvalidObjectException) {
                              result.o1 = (InvalidObjectException) e;
                              result.setO1IsSet(true);
    @@ -42513,6 +42664,835 @@ public class ThriftHiveMetastore {

        }

    + public static class drop_constraint_args implements org.apache.thrift.TBase<drop_constraint_args, drop_constraint_args._Fields>, java.io.Serializable, Cloneable, Comparable<drop_constraint_args> {
    + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_constraint_args");
    +
    + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1);
    +
    + private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
    + static {
    + schemes.put(StandardScheme.class, new drop_constraint_argsStandardSchemeFactory());
    + schemes.put(TupleScheme.class, new drop_constraint_argsTupleSchemeFactory());
    + }
    +
    + private DropConstraintRequest req; // required
    +
    + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
    + public enum _Fields implements org.apache.thrift.TFieldIdEnum {
    + REQ((short)1, "req");
    +
    + private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
    +
    + static {
    + for (_Fields field : EnumSet.allOf(_Fields.class)) {
    + byName.put(field.getFieldName(), field);
    + }
    + }
    +
    + /**
    + * Find the _Fields constant that matches fieldId, or null if its not found.
    + */
    + public static _Fields findByThriftId(int fieldId) {
    + switch(fieldId) {
    + case 1: // REQ
    + return REQ;
    + default:
    + return null;
    + }
    + }
    +
    + /**
    + * Find the _Fields constant that matches fieldId, throwing an exception
    + * if it is not found.
    + */
    + public static _Fields findByThriftIdOrThrow(int fieldId) {
    + _Fields fields = findByThriftId(fieldId);
    + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
    + return fields;
    + }
    +
    + /**
    + * Find the _Fields constant that matches name, or null if its not found.
    + */
    + public static _Fields findByName(String name) {
    + return byName.get(name);
    + }
    +
    + private final short _thriftId;
    + private final String _fieldName;
    +
    + _Fields(short thriftId, String fieldName) {
    + _thriftId = thriftId;
    + _fieldName = fieldName;
    + }
    +
    + public short getThriftFieldId() {
    + return _thriftId;
    + }
    +
    + public String getFieldName() {
    + return _fieldName;
    + }
    + }
    +
    + // isset id assignments
    + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
    + static {
    + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
    + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT,
    + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DropConstraintRequest.class)));
    + metaDataMap = Collections.unmodifiableMap(tmpMap);
    + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_constraint_args.class, metaDataMap);
    + }
    +
    + public drop_constraint_args() {
    + }
    +
    + public drop_constraint_args(
    + DropConstraintRequest req)
    + {
    + this();
    + this.req = req;
    + }
    +
    + /**
    + * Performs a deep copy on <i>other</i>.
    + */
    + public drop_constraint_args(drop_constraint_args other) {
    + if (other.isSetReq()) {
    + this.req = new DropConstraintRequest(other.req);
    + }
    + }
    +
    + public drop_constraint_args deepCopy() {
    + return new drop_constraint_args(this);
    + }
    +
    + @Override
    + public void clear() {
    + this.req = null;
    + }
    +
    + public DropConstraintRequest getReq() {
    + return this.req;
    + }
    +
    + public void setReq(DropConstraintRequest req) {
    + this.req = req;
    + }
    +
    + public void unsetReq() {
    + this.req = null;
    + }
    +
    + /** Returns true if field req is set (has been assigned a value) and false otherwise */
    + public boolean isSetReq() {
    + return this.req != null;
    + }
    +
    + public void setReqIsSet(boolean value) {
    + if (!value) {
    + this.req = null;
    + }
    + }
    +
    + public void setFieldValue(_Fields field, Object value) {
    + switch (field) {
    + case REQ:
    + if (value == null) {
    + unsetReq();
    + } else {
    + setReq((DropConstraintRequest)value);
    + }
    + break;
    +
    + }
    + }
    +
    + public Object getFieldValue(_Fields field) {
    + switch (field) {
    + case REQ:
    + return getReq();
    +
    + }
    + throw new IllegalStateException();
    + }
    +
    + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
    + public boolean isSet(_Fields field) {
    + if (field == null) {
    + throw new IllegalArgumentException();
    + }
    +
    + switch (field) {
    + case REQ:
    + return isSetReq();
    + }
    + throw new IllegalStateException();
    + }
    +
    + @Override
    + public boolean equals(Object that) {
    + if (that == null)
    + return false;
    + if (that instanceof drop_constraint_args)
    + return this.equals((drop_constraint_args)that);
    + return false;
    + }
    +
    + public boolean equals(drop_constraint_args that) {
    + if (that == null)
    + return false;
    +
    + boolean this_present_req = true && this.isSetReq();
    + boolean that_present_req = true && that.isSetReq();
    + if (this_present_req || that_present_req) {
    + if (!(this_present_req && that_present_req))
    + return false;
    + if (!this.req.equals(that.req))
    + return false;
    + }
    +
    + return true;
    + }
    +
    + @Override
    + public int hashCode() {
    + List<Object> list = new ArrayList<Object>();
    +
    + boolean present_req = true && (isSetReq());
    + list.add(present_req);
    + if (present_req)
    + list.add(req);
    +
    + return list.hashCode();
    + }
    +
    + @Override
    + public int compareTo(drop_constraint_args other) {
    + if (!getCl

    <TRUNCATED>
  • Spena at May 6, 2016 at 8:42 pm
    Merge branch 'master' into llap

    Conflicts:
      llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
      llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
      llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e0579097
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e0579097
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e0579097

    Branch: refs/heads/java8
    Commit: e057909732b40b581fcad3f61fb798600f01ecdf
    Parents: 4847f65 8729966
    Author: Jason Dere <jdere@hortonworks.com>
    Authored: Wed May 4 00:17:12 2016 -0700
    Committer: Jason Dere <jdere@hortonworks.com>
    Committed: Wed May 4 00:17:12 2016 -0700

    ----------------------------------------------------------------------
      HIVE-13509.2.patch | 478 --
      .../ext/LlapTaskUmbilicalExternalClient.java | 18 +-
      .../daemon/rpc/LlapDaemonProtocolProtos.java | 7000 +++++++++++-------
      .../org/apache/hadoop/hive/llap/DaemonId.java | 41 +
      .../hive/llap/security/LlapTokenIdentifier.java | 39 +-
      .../hive/llap/security/LlapTokenProvider.java | 2 +-
      .../apache/hadoop/hive/llap/tez/Converters.java | 84 +-
      .../src/protobuf/LlapDaemonProtocol.proto | 70 +-
      .../hadoop/hive/llap/tez/TestConverters.java | 51 +-
      .../hadoop/hive/llap/LlapBaseInputFormat.java | 32 +-
      .../hive/llap/daemon/ContainerRunner.java | 9 +-
      .../llap/daemon/impl/ContainerRunnerImpl.java | 135 +-
      .../hive/llap/daemon/impl/LlapDaemon.java | 52 +-
      .../daemon/impl/LlapProtocolServerImpl.java | 41 +-
      .../hive/llap/daemon/impl/LlapTokenChecker.java | 137 +
      .../llap/daemon/impl/QueryFragmentInfo.java | 23 +-
      .../hadoop/hive/llap/daemon/impl/QueryInfo.java | 26 +-
      .../hive/llap/daemon/impl/QueryTracker.java | 97 +-
      .../hadoop/hive/llap/daemon/impl/Scheduler.java | 2 +
      .../llap/daemon/impl/TaskExecutorService.java | 17 +-
      .../llap/daemon/impl/TaskRunnerCallable.java | 77 +-
      .../hive/llap/security/LlapSecurityHelper.java | 15 +-
      .../hive/llap/security/SecretManager.java | 19 +-
      .../hive/llap/daemon/MiniLlapCluster.java | 2 +-
      .../daemon/impl/TaskExecutorTestHelpers.java | 44 +-
      .../impl/TestLlapDaemonProtocolServerImpl.java | 2 +-
      .../llap/daemon/impl/TestLlapTokenChecker.java | 96 +
      .../TestFirstInFirstOutComparator.java | 27 +-
      .../llap/tezplugins/LlapTaskCommunicator.java | 31 +-
      .../hadoop/hive/metastore/txn/TxnHandler.java | 5 +
      .../hive/ql/exec/tez/TezSessionState.java | 3 +-
      .../hadoop/hive/ql/lockmgr/DbTxnManager.java | 12 +
      .../hadoop/hive/ql/parse/SemanticAnalyzer.java | 1 -
      .../ql/parse/UpdateDeleteSemanticAnalyzer.java | 16 +-
      .../apache/hadoop/hive/ql/TestTxnCommands2.java | 22 +
      .../hive/ql/lockmgr/TestDbTxnManager2.java | 114 +
      .../dynpart_sort_optimization_acid.q.out | 120 +-
      37 files changed, 5479 insertions(+), 3481 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/e0579097/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
    ----------------------------------------------------------------------
    diff --cc llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
    index fe2fd7c,0000000..6e2c85d
    mode 100644,000000..100644
    --- a/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
    +++ b/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
    @@@ -1,413 -1,0 +1,421 @@@
      +package org.apache.hadoop.hive.llap.ext;
      +
      +import java.io.IOException;
      +import java.net.InetSocketAddress;
      +import java.util.ArrayList;
      +import java.util.Collections;
      +import java.util.List;
      +import java.util.concurrent.atomic.AtomicLong;
      +import java.util.concurrent.ConcurrentHashMap;
      +import java.util.concurrent.ConcurrentMap;
      +import java.util.concurrent.ScheduledThreadPoolExecutor;
      +import java.util.concurrent.TimeUnit;
      +
      +import com.google.common.base.Preconditions;
      +import org.apache.commons.collections4.ListUtils;
      +import org.apache.hadoop.conf.Configuration;
      +import org.apache.hadoop.hive.conf.HiveConf;
      +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
    ++import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec;
      +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
    ++import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexIdentifier;
      +import org.apache.hadoop.hive.llap.protocol.LlapTaskUmbilicalProtocol;
    ++import org.apache.hadoop.hive.llap.tez.Converters;
      +import org.apache.hadoop.hive.llap.tez.LlapProtocolClientProxy;
      +import org.apache.hadoop.hive.llap.tezplugins.helpers.LlapTaskUmbilicalServer;
      +import org.apache.hadoop.io.Text;
      +import org.apache.hadoop.ipc.ProtocolSignature;
      +import org.apache.hadoop.security.token.Token;
      +import org.apache.hadoop.service.AbstractService;
      +import org.apache.hadoop.yarn.api.records.ContainerId;
      +import org.apache.hadoop.yarn.util.ConverterUtils;
      +import org.apache.tez.common.security.JobTokenIdentifier;
      +import org.apache.tez.dag.api.TezException;
      +import org.apache.tez.dag.records.TezTaskAttemptID;
      +import org.apache.tez.runtime.api.Event;
      +import org.apache.tez.runtime.api.impl.EventType;
      +import org.apache.tez.runtime.api.impl.TezEvent;
      +import org.apache.tez.runtime.api.impl.TezHeartbeatRequest;
      +import org.apache.tez.runtime.api.impl.TezHeartbeatResponse;
      +import org.slf4j.Logger;
      +import org.slf4j.LoggerFactory;
      +
      +
      +public class LlapTaskUmbilicalExternalClient extends AbstractService {
      +
      + private static final Logger LOG = LoggerFactory.getLogger(LlapTaskUmbilicalExternalClient.class);
      +
      + private final LlapProtocolClientProxy communicator;
      + private volatile LlapTaskUmbilicalServer llapTaskUmbilicalServer;
      + private final Configuration conf;
      + private final LlapTaskUmbilicalProtocol umbilical;
      +
      + protected final String tokenIdentifier;
      + protected final Token<JobTokenIdentifier> sessionToken;
      +
      + private final ConcurrentMap<String, PendingEventData> pendingEvents = new ConcurrentHashMap<>();
      + private final ConcurrentMap<String, TaskHeartbeatInfo> registeredTasks= new ConcurrentHashMap<String, TaskHeartbeatInfo>();
      + private LlapTaskUmbilicalExternalResponder responder = null;
      + private final ScheduledThreadPoolExecutor timer;
      + private final long connectionTimeout;
      +
      + private static class TaskHeartbeatInfo {
      + final String taskAttemptId;
      + final String hostname;
      + final int port;
      + final AtomicLong lastHeartbeat = new AtomicLong();
      +
      + public TaskHeartbeatInfo(String taskAttemptId, String hostname, int port) {
      + this.taskAttemptId = taskAttemptId;
      + this.hostname = hostname;
      + this.port = port;
      + this.lastHeartbeat.set(System.currentTimeMillis());
      + }
      + }
      +
      + private static class PendingEventData {
      + final TaskHeartbeatInfo heartbeatInfo;
      + final List<TezEvent> tezEvents;
      +
      + public PendingEventData(TaskHeartbeatInfo heartbeatInfo, List<TezEvent> tezEvents) {
      + this.heartbeatInfo = heartbeatInfo;
      + this.tezEvents = tezEvents;
      + }
      + }
      +
      + // TODO KKK Work out the details of the tokenIdentifier, and the session token.
      + // It may just be possible to create one here - since Shuffle is not involved, and this is only used
      + // for communication from LLAP-Daemons to the server. It will need to be sent in as part
      + // of the job submission request.
      + public LlapTaskUmbilicalExternalClient(Configuration conf, String tokenIdentifier,
      + Token<JobTokenIdentifier> sessionToken, LlapTaskUmbilicalExternalResponder responder) {
      + super(LlapTaskUmbilicalExternalClient.class.getName());
      + this.conf = conf;
      + this.umbilical = new LlapTaskUmbilicalExternalImpl();
      + this.tokenIdentifier = tokenIdentifier;
      + this.sessionToken = sessionToken;
      + this.responder = responder;
      + this.timer = new ScheduledThreadPoolExecutor(1);
      + this.connectionTimeout = HiveConf.getTimeVar(conf,
      + HiveConf.ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS, TimeUnit.MILLISECONDS);
      + // TODO. No support for the LLAP token yet. Add support for configurable threads, however 1 should always be enough.
      + this.communicator = new LlapProtocolClientProxy(1, conf, null);
      + this.communicator.init(conf);
      + }
      +
      + @Override
      + public void serviceStart() throws IOException {
      + // If we use a single server for multiple external clients, then consider using more than one handler.
      + int numHandlers = 1;
      + llapTaskUmbilicalServer = new LlapTaskUmbilicalServer(conf, umbilical, numHandlers, tokenIdentifier, sessionToken);
      + communicator.start();
      + }
      +
      + @Override
      + public void serviceStop() {
      + llapTaskUmbilicalServer.shutdownServer();
      + timer.shutdown();
      + if (this.communicator != null) {
      + this.communicator.stop();
      + }
      + }
      +
      + public InetSocketAddress getAddress() {
      + return llapTaskUmbilicalServer.getAddress();
      + }
      +
      +
      + /**
      + * Submit the work for actual execution.
      + * @param submitWorkRequestProto
      + */
      + public void submitWork(final SubmitWorkRequestProto submitWorkRequestProto, String llapHost, int llapPort, List<TezEvent> tezEvents) {
      + // Register the pending events to be sent for this spec.
    - String fragmentId = submitWorkRequestProto.getFragmentSpec().getFragmentIdentifierString();
    ++ SignableVertexSpec vertex = submitWorkRequestProto.getWorkSpec().getVertex();
    ++ VertexIdentifier vId = vertex.getVertexIdentifier();
    ++ TezTaskAttemptID attemptId = Converters.createTaskAttemptId(
    ++ vId, submitWorkRequestProto.getFragmentNumber(), submitWorkRequestProto.getAttemptNumber());
    ++ final String fragmentId = attemptId.toString();
    ++
      + PendingEventData pendingEventData = new PendingEventData(
      + new TaskHeartbeatInfo(fragmentId, llapHost, llapPort),
      + tezEvents);
      + pendingEvents.putIfAbsent(fragmentId, pendingEventData);
      +
      + // Setup timer task to check for hearbeat timeouts
      + timer.scheduleAtFixedRate(new HeartbeatCheckTask(),
      + connectionTimeout, connectionTimeout, TimeUnit.MILLISECONDS);
      +
      + // Send out the actual SubmitWorkRequest
      + communicator.sendSubmitWork(submitWorkRequestProto, llapHost, llapPort,
      + new LlapProtocolClientProxy.ExecuteRequestCallback<LlapDaemonProtocolProtos.SubmitWorkResponseProto>() {
      +
      + @Override
      + public void setResponse(LlapDaemonProtocolProtos.SubmitWorkResponseProto response) {
      + if (response.hasSubmissionState()) {
      + if (response.getSubmissionState().equals(LlapDaemonProtocolProtos.SubmissionStateProto.REJECTED)) {
    - String msg = "Fragment: " + submitWorkRequestProto.getFragmentSpec().getFragmentIdentifierString() + " rejected. Server Busy.";
    ++ String msg = "Fragment: " + fragmentId + " rejected. Server Busy.";
      + LOG.info(msg);
      + if (responder != null) {
      + Throwable err = new RuntimeException(msg);
    - responder.submissionFailed(submitWorkRequestProto.getFragmentSpec().getFragmentIdentifierString(), err);
    ++ responder.submissionFailed(fragmentId, err);
      + }
      + return;
      + }
      + }
      + }
      +
      + @Override
      + public void indicateError(Throwable t) {
    - String msg = "Failed to submit: " + submitWorkRequestProto.getFragmentSpec().getFragmentIdentifierString();
    ++ String msg = "Failed to submit: " + fragmentId;
      + LOG.error(msg, t);
      + Throwable err = new RuntimeException(msg, t);
    - responder.submissionFailed(submitWorkRequestProto.getFragmentSpec().getFragmentIdentifierString(), err);
    ++ responder.submissionFailed(fragmentId, err);
      + }
      + });
      +
      +
      +
      +
      +
      +// // TODO Also send out information saying that the fragment is finishable - if that is not already included in the main fragment.
      +// // This entire call is only required if we're doing more than scans. MRInput has no dependencies and is always finishable
      +// QueryIdentifierProto queryIdentifier = QueryIdentifierProto
      +// .newBuilder()
      +// .setAppIdentifier(submitWorkRequestProto.getApplicationIdString()).setDagIdentifier(submitWorkRequestProto.getFragmentSpec().getDagId())
      +// .build();
      +// LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto sourceStateUpdatedRequest =
      +// LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.newBuilder().setQueryIdentifier(queryIdentifier).setState(
      +// LlapDaemonProtocolProtos.SourceStateProto.S_SUCCEEDED).
      +// setSrcName(TODO)
      +// communicator.sendSourceStateUpdate(LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.newBuilder().setQueryIdentifier(submitWorkRequestProto.getFragmentSpec().getFragmentIdentifierString()).set);
      +
      +
      + }
      +
      + private void updateHeartbeatInfo(String taskAttemptId) {
      + int updateCount = 0;
      +
      + PendingEventData pendingEventData = pendingEvents.get(taskAttemptId);
      + if (pendingEventData != null) {
      + pendingEventData.heartbeatInfo.lastHeartbeat.set(System.currentTimeMillis());
      + updateCount++;
      + }
      +
      + TaskHeartbeatInfo heartbeatInfo = registeredTasks.get(taskAttemptId);
      + if (heartbeatInfo != null) {
      + heartbeatInfo.lastHeartbeat.set(System.currentTimeMillis());
      + updateCount++;
      + }
      +
      + if (updateCount == 0) {
      + LOG.warn("No tasks found for heartbeat from taskAttemptId " + taskAttemptId);
      + }
      + }
      +
      + private void updateHeartbeatInfo(String hostname, int port) {
      + int updateCount = 0;
      +
      + for (String key : pendingEvents.keySet()) {
      + PendingEventData pendingEventData = pendingEvents.get(key);
      + if (pendingEventData != null) {
      + if (pendingEventData.heartbeatInfo.hostname.equals(hostname)
      + && pendingEventData.heartbeatInfo.port == port) {
      + pendingEventData.heartbeatInfo.lastHeartbeat.set(System.currentTimeMillis());
      + updateCount++;
      + }
      + }
      + }
      +
      + for (String key : registeredTasks.keySet()) {
      + TaskHeartbeatInfo heartbeatInfo = registeredTasks.get(key);
      + if (heartbeatInfo != null) {
      + if (heartbeatInfo.hostname.equals(hostname)
      + && heartbeatInfo.port == port) {
      + heartbeatInfo.lastHeartbeat.set(System.currentTimeMillis());
      + updateCount++;
      + }
      + }
      + }
      +
      + if (updateCount == 0) {
      + LOG.info("No tasks found for heartbeat from hostname " + hostname + ", port " + port);
      + }
      + }
      +
      + private class HeartbeatCheckTask implements Runnable {
      + public void run() {
      + long currentTime = System.currentTimeMillis();
      + List<String> timedOutTasks = new ArrayList<String>();
      +
      + // Check both pending and registered tasks for timeouts
      + for (String key : pendingEvents.keySet()) {
      + PendingEventData pendingEventData = pendingEvents.get(key);
      + if (pendingEventData != null) {
      + if (currentTime - pendingEventData.heartbeatInfo.lastHeartbeat.get() >= connectionTimeout) {
      + timedOutTasks.add(key);
      + }
      + }
      + }
      + for (String timedOutTask : timedOutTasks) {
      + LOG.info("Pending taskAttemptId " + timedOutTask + " timed out");
      + responder.heartbeatTimeout(timedOutTask);
      + pendingEvents.remove(timedOutTask);
      + // TODO: Do we need to tell the LLAP daemon we are no longer interested in this task?
      + }
      +
      + timedOutTasks.clear();
      + for (String key : registeredTasks.keySet()) {
      + TaskHeartbeatInfo heartbeatInfo = registeredTasks.get(key);
      + if (heartbeatInfo != null) {
      + if (currentTime - heartbeatInfo.lastHeartbeat.get() >= connectionTimeout) {
      + timedOutTasks.add(key);
      + }
      + }
      + }
      + for (String timedOutTask : timedOutTasks) {
      + LOG.info("Running taskAttemptId " + timedOutTask + " timed out");
      + responder.heartbeatTimeout(timedOutTask);
      + registeredTasks.remove(timedOutTask);
      + // TODO: Do we need to tell the LLAP daemon we are no longer interested in this task?
      + }
      + }
      + }
      +
      + public interface LlapTaskUmbilicalExternalResponder {
      + void submissionFailed(String fragmentId, Throwable throwable);
      + void heartbeat(TezHeartbeatRequest request);
      + void taskKilled(TezTaskAttemptID taskAttemptId);
      + void heartbeatTimeout(String fragmentId);
      + }
      +
      +
      +
      + // TODO Ideally, the server should be shared across all client sessions running on the same node.
      + private class LlapTaskUmbilicalExternalImpl implements LlapTaskUmbilicalProtocol {
      +
      + @Override
      + public boolean canCommit(TezTaskAttemptID taskid) throws IOException {
      + // Expecting only a single instance of a task to be running.
      + return true;
      + }
      +
      + @Override
      + public TezHeartbeatResponse heartbeat(TezHeartbeatRequest request) throws IOException,
      + TezException {
      + // Keep-alive information. The client should be informed and will have to take care of re-submitting the work.
      + // Some parts of fault tolerance go here.
      +
      + // This also provides completion information, and a possible notification when task actually starts running (first heartbeat)
      +
      + if (LOG.isDebugEnabled()) {
      + LOG.debug("Received heartbeat from container, request=" + request);
      + }
      +
      + // Incoming events can be ignored until the point when shuffle needs to be handled, instead of just scans.
      + TezHeartbeatResponse response = new TezHeartbeatResponse();
      +
      + response.setLastRequestId(request.getRequestId());
      + // Assuming TaskAttemptId and FragmentIdentifierString are the same. Verify this.
      + TezTaskAttemptID taskAttemptId = request.getCurrentTaskAttemptID();
      + String taskAttemptIdString = taskAttemptId.toString();
      +
      + updateHeartbeatInfo(taskAttemptIdString);
      +
      + List<TezEvent> tezEvents = null;
      + PendingEventData pendingEventData = pendingEvents.remove(taskAttemptIdString);
      + if (pendingEventData == null) {
      + tezEvents = Collections.emptyList();
      +
      + // If this heartbeat was not from a pending event and it's not in our list of registered tasks,
      + if (!registeredTasks.containsKey(taskAttemptIdString)) {
      + LOG.info("Unexpected heartbeat from " + taskAttemptIdString);
      + response.setShouldDie(); // Do any of the other fields need to be set?
      + return response;
      + }
      + } else {
      + tezEvents = pendingEventData.tezEvents;
      + // Tasks removed from the pending list should then be added to the registered list.
      + registeredTasks.put(taskAttemptIdString, pendingEventData.heartbeatInfo);
      + }
      +
      + response.setLastRequestId(request.getRequestId());
      + // Irrelevant from eventIds. This can be tracked in the AM itself, instead of polluting the task.
      + // Also since we have all the MRInput events here - they'll all be sent in together.
      + response.setNextFromEventId(0); // Irrelevant. See comment above.
      + response.setNextPreRoutedEventId(0); //Irrelevant. See comment above.
      + response.setEvents(tezEvents);
      +
      + List<TezEvent> inEvents = request.getEvents();
      + if (LOG.isDebugEnabled()) {
      + LOG.debug("Heartbeat from " + taskAttemptIdString +
      + " events: " + (inEvents != null ? inEvents.size() : -1));
      + }
      + for (TezEvent tezEvent : ListUtils.emptyIfNull(inEvents)) {
      + EventType eventType = tezEvent.getEventType();
      + switch (eventType) {
      + case TASK_ATTEMPT_COMPLETED_EVENT:
      + LOG.debug("Task completed event for " + taskAttemptIdString);
      + registeredTasks.remove(taskAttemptIdString);
      + break;
      + case TASK_ATTEMPT_FAILED_EVENT:
      + LOG.debug("Task failed event for " + taskAttemptIdString);
      + registeredTasks.remove(taskAttemptIdString);
      + break;
      + case TASK_STATUS_UPDATE_EVENT:
      + // If we want to handle counters
      + LOG.debug("Task update event for " + taskAttemptIdString);
      + break;
      + default:
      + LOG.warn("Unhandled event type " + eventType);
      + break;
      + }
      + }
      +
      + // Pass the request on to the responder
      + try {
      + if (responder != null) {
      + responder.heartbeat(request);
      + }
      + } catch (Exception err) {
      + LOG.error("Error during responder execution", err);
      + }
      +
      + return response;
      + }
      +
      + @Override
      + public void nodeHeartbeat(Text hostname, int port) throws IOException {
      + updateHeartbeatInfo(hostname.toString(), port);
      + // No need to propagate to this to the responder
      + }
      +
      + @Override
      + public void taskKilled(TezTaskAttemptID taskAttemptId) throws IOException {
      + String taskAttemptIdString = taskAttemptId.toString();
      + LOG.error("Task killed - " + taskAttemptIdString);
      + registeredTasks.remove(taskAttemptIdString);
      +
      + try {
      + if (responder != null) {
      + responder.taskKilled(taskAttemptId);
      + }
      + } catch (Exception err) {
      + LOG.error("Error during responder execution", err);
      + }
      + }
      +
      + @Override
      + public long getProtocolVersion(String protocol, long clientVersion) throws IOException {
      + return 0;
      + }
      +
      + @Override
      + public ProtocolSignature getProtocolSignature(String protocol, long clientVersion,
      + int clientMethodsHash) throws IOException {
      + return ProtocolSignature.getProtocolSignature(this, protocol,
      + clientVersion, clientMethodsHash);
      + }
      + }
      +
      +}

    http://git-wip-us.apache.org/repos/asf/hive/blob/e0579097/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapBaseInputFormat.java
    ----------------------------------------------------------------------
    diff --cc llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapBaseInputFormat.java
    index 8db2f88,0000000..988002f
    mode 100644,000000..100644
    --- a/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapBaseInputFormat.java
    +++ b/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapBaseInputFormat.java
    @@@ -1,476 -1,0 +1,480 @@@
      +/*
      + * Licensed to the Apache Software Foundation (ASF) under one or more
      + * contributor license agreements. See the NOTICE file distributed with
      + * this work for additional information regarding copyright ownership.
      + * The ASF licenses this file to You under the Apache License, Version 2.0
      + * (the "License"); you may not use this file except in compliance with
      + * the License. You may obtain a copy of the License at
      + *
      + * http://www.apache.org/licenses/LICENSE-2.0
      + *
      + * Unless required by applicable law or agreed to in writing, software
      + * distributed under the License is distributed on an "AS IS" BASIS,
      + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      + * See the License for the specific language governing permissions and
      + * limitations under the License.
      + */
      +package org.apache.hadoop.hive.llap;
      +
      +import java.util.ArrayList;
      +import java.util.List;
      +import java.util.Set;
      +import java.util.concurrent.LinkedBlockingQueue;
      +
      +import java.sql.SQLException;
      +import java.sql.Connection;
      +import java.sql.ResultSet;
      +import java.sql.Statement;
      +import java.sql.DriverManager;
      +
      +import java.io.IOException;
      +import java.io.DataInput;
      +import java.io.DataOutput;
      +import java.io.DataInputStream;
      +import java.io.ByteArrayInputStream;
      +import java.net.InetAddress;
      +import java.net.InetSocketAddress;
      +import java.net.Socket;
      +import java.nio.ByteBuffer;
      +
      +import org.apache.commons.collections4.ListUtils;
      +
      +import org.apache.hadoop.fs.FileSystem;
      +import org.apache.hadoop.hive.conf.HiveConf;
      +import org.apache.hadoop.hive.llap.LlapBaseRecordReader;
      +import org.apache.hadoop.hive.llap.LlapBaseRecordReader.ReaderEvent;
      +import org.apache.hadoop.hive.llap.LlapInputSplit;
      +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo;
    ++import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec;
      +import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto;
    ++import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary;
      +import org.apache.hadoop.hive.llap.ext.LlapTaskUmbilicalExternalClient;
      +import org.apache.hadoop.hive.llap.ext.LlapTaskUmbilicalExternalClient.LlapTaskUmbilicalExternalResponder;
      +import org.apache.hadoop.hive.llap.registry.ServiceInstance;
      +import org.apache.hadoop.hive.llap.registry.ServiceInstanceSet;
      +import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
      +import org.apache.hadoop.hive.llap.tez.Converters;
      +import org.apache.hadoop.io.DataInputBuffer;
      +import org.apache.hadoop.io.DataOutputBuffer;
      +import org.apache.hadoop.io.Text;
      +import org.apache.hadoop.io.WritableComparable;
      +import org.apache.hadoop.io.NullWritable;
      +import org.apache.hadoop.io.Writable;
      +import org.apache.hadoop.mapred.JobConf;
      +import org.apache.hadoop.mapred.InputFormat;
      +import org.apache.hadoop.mapred.InputSplit;
      +import org.apache.hadoop.mapred.InputSplitWithLocationInfo;
      +import org.apache.hadoop.mapred.SplitLocationInfo;
      +import org.apache.hadoop.mapred.FileSplit;
      +import org.apache.hadoop.mapred.RecordReader;
      +import org.apache.hadoop.mapred.Reporter;
      +import org.apache.hadoop.security.Credentials;
      +import org.apache.hadoop.security.UserGroupInformation;
      +import org.apache.hadoop.security.token.Token;
      +import org.apache.hadoop.security.token.TokenIdentifier;
      +import org.apache.hadoop.util.Progressable;
      +import org.apache.hadoop.yarn.api.ApplicationConstants;
      +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
      +import org.apache.hadoop.yarn.api.records.ApplicationId;
      +import org.apache.hadoop.yarn.api.records.ContainerId;
      +
      +import org.apache.tez.common.security.JobTokenIdentifier;
      +import org.apache.tez.common.security.TokenCache;
      +import org.apache.tez.dag.records.TezTaskAttemptID;
      +import org.apache.tez.runtime.api.events.TaskAttemptFailedEvent;
      +import org.apache.tez.runtime.api.impl.EventType;
      +import org.apache.tez.runtime.api.impl.TaskSpec;
      +import org.apache.tez.runtime.api.impl.TezEvent;
      +import org.apache.tez.runtime.api.impl.TezEvent;
      +import org.apache.tez.runtime.api.impl.TezHeartbeatRequest;
      +import org.apache.tez.runtime.api.impl.TezHeartbeatResponse;
      +
      +import org.slf4j.Logger;
      +import org.slf4j.LoggerFactory;
      +
      +import com.google.common.base.Preconditions;
      +import com.google.common.collect.Lists;
      +import com.google.protobuf.ByteString;
      +
      +
      +public class LlapBaseInputFormat<V extends WritableComparable> implements InputFormat<NullWritable, V> {
      +
      + private static final Logger LOG = LoggerFactory.getLogger(LlapBaseInputFormat.class);
      +
      + private static String driverName = "org.apache.hive.jdbc.HiveDriver";
      + private String url; // "jdbc:hive2://localhost:10000/default"
      + private String user; // "hive",
      + private String pwd; // ""
      + private String query;
      +
      + public static final String URL_KEY = "llap.if.hs2.connection";
      + public static final String QUERY_KEY = "llap.if.query";
      + public static final String USER_KEY = "llap.if.user";
      + public static final String PWD_KEY = "llap.if.pwd";
      +
      + public final String SPLIT_QUERY = "select get_splits(\"%s\",%d)";
      +
      + private Connection con;
      + private Statement stmt;
      +
      + public LlapBaseInputFormat(String url, String user, String pwd, String query) {
      + this.url = url;
      + this.user = user;
      + this.pwd = pwd;
      + this.query = query;
      + }
      +
      + public LlapBaseInputFormat() {}
      +
      +
      + @Override
      + public RecordReader<NullWritable, V> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
      +
      + LlapInputSplit llapSplit = (LlapInputSplit) split;
      +
      + // Set conf to use LLAP user rather than current user for LLAP Zk registry.
      + HiveConf.setVar(job, HiveConf.ConfVars.LLAP_ZK_REGISTRY_USER, llapSplit.getLlapUser());
      + SubmitWorkInfo submitWorkInfo = SubmitWorkInfo.fromBytes(llapSplit.getPlanBytes());
      +
      + ServiceInstance serviceInstance = getServiceInstance(job, llapSplit);
      + String host = serviceInstance.getHost();
      + int llapSubmitPort = serviceInstance.getRpcPort();
      +
      + LOG.info("Found service instance for host " + host + " with rpc port " + llapSubmitPort
      + + " and outputformat port " + serviceInstance.getOutputFormatPort());
      +
      + LlapRecordReaderTaskUmbilicalExternalResponder umbilicalResponder =
      + new LlapRecordReaderTaskUmbilicalExternalResponder();
      + LlapTaskUmbilicalExternalClient llapClient =
      + new LlapTaskUmbilicalExternalClient(job, submitWorkInfo.getTokenIdentifier(),
      + submitWorkInfo.getToken(), umbilicalResponder);
      + llapClient.init(job);
      + llapClient.start();
      +
      + SubmitWorkRequestProto submitWorkRequestProto =
      + constructSubmitWorkRequestProto(submitWorkInfo, llapSplit.getSplitNum(),
      + llapClient.getAddress(), submitWorkInfo.getToken());
      +
      + TezEvent tezEvent = new TezEvent();
      + DataInputBuffer dib = new DataInputBuffer();
      + dib.reset(llapSplit.getFragmentBytes(), 0, llapSplit.getFragmentBytes().length);
      + tezEvent.readFields(dib);
      + List<TezEvent> tezEventList = Lists.newArrayList();
      + tezEventList.add(tezEvent);
      +
      + llapClient.submitWork(submitWorkRequestProto, host, llapSubmitPort, tezEventList);
      +
      + String id = HiveConf.getVar(job, HiveConf.ConfVars.HIVEQUERYID) + "_" + llapSplit.getSplitNum();
      +
      + HiveConf conf = new HiveConf();
      + Socket socket = new Socket(host,
      + serviceInstance.getOutputFormatPort());
      +
      + LOG.debug("Socket connected");
      +
      + socket.getOutputStream().write(id.getBytes());
      + socket.getOutputStream().write(0);
      + socket.getOutputStream().flush();
      +
      + LOG.info("Registered id: " + id);
      +
      + LlapBaseRecordReader recordReader = new LlapBaseRecordReader(socket.getInputStream(), llapSplit.getSchema(), Text.class);
      + umbilicalResponder.setRecordReader(recordReader);
      + return recordReader;
      + }
      +
      + @Override
      + public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
      + List<InputSplit> ins = new ArrayList<InputSplit>();
      +
      + if (url == null) url = job.get(URL_KEY);
      + if (query == null) query = job.get(QUERY_KEY);
      + if (user == null) user = job.get(USER_KEY);
      + if (pwd == null) pwd = job.get(PWD_KEY);
      +
      + if (url == null || query == null) {
      + throw new IllegalStateException();
      + }
      +
      + try {
      + Class.forName(driverName);
      + } catch (ClassNotFoundException e) {
      + throw new IOException(e);
      + }
      +
      + try {
      + con = DriverManager.getConnection(url,user,pwd);
      + stmt = con.createStatement();
      + String sql = String.format(SPLIT_QUERY, query, numSplits);
      + ResultSet res = stmt.executeQuery(sql);
      + while (res.next()) {
      + // deserialize split
      + DataInput in = new DataInputStream(res.getBinaryStream(1));
      + InputSplitWithLocationInfo is = new LlapInputSplit();
      + is.readFields(in);
      + ins.add(is);
      + }
      +
      + res.close();
      + stmt.close();
      + } catch (Exception e) {
      + throw new IOException(e);
      + }
      + return ins.toArray(new InputSplit[ins.size()]);
      + }
      +
      + public void close() {
      + try {
      + con.close();
      + } catch (Exception e) {
      + // ignore
      + }
      + }
      +
      + private ServiceInstance getServiceInstance(JobConf job, LlapInputSplit llapSplit) throws IOException {
      + LlapRegistryService registryService = LlapRegistryService.getClient(job);
      + String host = llapSplit.getLocations()[0];
      +
      + ServiceInstance serviceInstance = getServiceInstanceForHost(registryService, host);
      + if (serviceInstance == null) {
      + throw new IOException("No service instances found for " + host + " in registry");
      + }
      +
      + return serviceInstance;
      + }
      +
      + private ServiceInstance getServiceInstanceForHost(LlapRegistryService registryService, String host) throws IOException {
      + InetAddress address = InetAddress.getByName(host);
      + ServiceInstanceSet instanceSet = registryService.getInstances();
      + ServiceInstance serviceInstance = null;
      +
      + // The name used in the service registry may not match the host name we're using.
      + // Try hostname/canonical hostname/host address
      +
      + String name = address.getHostName();
      + LOG.info("Searching service instance by hostname " + name);
      + serviceInstance = selectServiceInstance(instanceSet.getByHost(name));
      + if (serviceInstance != null) {
      + return serviceInstance;
      + }
      +
      + name = address.getCanonicalHostName();
      + LOG.info("Searching service instance by canonical hostname " + name);
      + serviceInstance = selectServiceInstance(instanceSet.getByHost(name));
      + if (serviceInstance != null) {
      + return serviceInstance;
      + }
      +
      + name = address.getHostAddress();
      + LOG.info("Searching service instance by address " + name);
      + serviceInstance = selectServiceInstance(instanceSet.getByHost(name));
      + if (serviceInstance != null) {
      + return serviceInstance;
      + }
      +
      + return serviceInstance;
      + }
      +
      + private ServiceInstance selectServiceInstance(Set<ServiceInstance> serviceInstances) {
      + if (serviceInstances == null || serviceInstances.isEmpty()) {
      + return null;
      + }
      +
      + // Get the first live service instance
      + for (ServiceInstance serviceInstance : serviceInstances) {
      + if (serviceInstance.isAlive()) {
      + return serviceInstance;
      + }
      + }
      +
      + LOG.info("No live service instances were found");
      + return null;
      + }
      +
      + private SubmitWorkRequestProto constructSubmitWorkRequestProto(SubmitWorkInfo submitWorkInfo,
      + int taskNum,
      + InetSocketAddress address,
      + Token<JobTokenIdentifier> token) throws
      + IOException {
      + TaskSpec taskSpec = submitWorkInfo.getTaskSpec();
      + ApplicationId appId = submitWorkInfo.getFakeAppId();
      +
    - SubmitWorkRequestProto.Builder builder = SubmitWorkRequestProto.newBuilder();
    ++ int attemptId = taskSpec.getTaskAttemptID().getId();
      + // This works, assuming the executor is running within YARN.
    - LOG.info("Setting user in submitWorkRequest to: " +
    - System.getenv(ApplicationConstants.Environment.USER.name()));
    - builder.setUser(System.getenv(ApplicationConstants.Environment.USER.name()));
    - builder.setApplicationIdString(appId.toString());
    - builder.setAppAttemptNumber(0);
    - builder.setTokenIdentifier(appId.toString());
    ++ String user = System.getenv(ApplicationConstants.Environment.USER.name());
    ++ LOG.info("Setting user in submitWorkRequest to: " + user);
    ++ SignableVertexSpec svs = Converters.convertTaskSpecToProto(
    ++ taskSpec, attemptId, appId.toString(), null, user); // TODO signatureKeyId
      +
      + ContainerId containerId =
      + ContainerId.newInstance(ApplicationAttemptId.newInstance(appId, 0), taskNum);
    - builder.setContainerIdString(containerId.toString());
      +
    - builder.setAmHost(address.getHostName());
    - builder.setAmPort(address.getPort());
    ++
      + Credentials taskCredentials = new Credentials();
      + // Credentials can change across DAGs. Ideally construct only once per DAG.
      + // TODO Figure out where credentials will come from. Normally Hive sets up
      + // URLs on the tez dag, for which Tez acquires credentials.
      +
      + // taskCredentials.addAll(getContext().getCredentials());
      +
      + // Preconditions.checkState(currentQueryIdentifierProto.getDagIdentifier() ==
      + // taskSpec.getTaskAttemptID().getTaskID().getVertexID().getDAGId().getId());
      + // ByteBuffer credentialsBinary = credentialMap.get(currentQueryIdentifierProto);
      + // if (credentialsBinary == null) {
      + // credentialsBinary = serializeCredentials(getContext().getCredentials());
      + // credentialMap.putIfAbsent(currentQueryIdentifierProto, credentialsBinary.duplicate());
      + // } else {
      + // credentialsBinary = credentialsBinary.duplicate();
      + // }
      + // builder.setCredentialsBinary(ByteString.copyFrom(credentialsBinary));
      + Credentials credentials = new Credentials();
      + TokenCache.setSessionToken(token, credentials);
      + ByteBuffer credentialsBinary = serializeCredentials(credentials);
    - builder.setCredentialsBinary(ByteString.copyFrom(credentialsBinary));
    -
    -
    - builder.setFragmentSpec(Converters.convertTaskSpecToProto(taskSpec));
      +
      + FragmentRuntimeInfo.Builder runtimeInfo = FragmentRuntimeInfo.newBuilder();
      + runtimeInfo.setCurrentAttemptStartTime(System.currentTimeMillis());
      + runtimeInfo.setWithinDagPriority(0);
      + runtimeInfo.setDagStartTime(submitWorkInfo.getCreationTime());
      + runtimeInfo.setFirstAttemptStartTime(submitWorkInfo.getCreationTime());
      + runtimeInfo.setNumSelfAndUpstreamTasks(taskSpec.getVertexParallelism());
      + runtimeInfo.setNumSelfAndUpstreamCompletedTasks(0);
      +
    ++ SubmitWorkRequestProto.Builder builder = SubmitWorkRequestProto.newBuilder();
      +
    ++ builder.setWorkSpec(VertexOrBinary.newBuilder().setVertex(svs).build());
    ++ // TODO work spec signature
    ++ builder.setFragmentNumber(taskSpec.getTaskAttemptID().getTaskID().getId());
    ++ builder.setAttemptNumber(0);
    ++ builder.setContainerIdString(containerId.toString());
    ++ builder.setAmHost(address.getHostName());
    ++ builder.setAmPort(address.getPort());
    ++ builder.setCredentialsBinary(ByteString.copyFrom(credentialsBinary));
      + builder.setFragmentRuntimeInfo(runtimeInfo.build());
    ++
      + return builder.build();
      + }
      +
      + private ByteBuffer serializeCredentials(Credentials credentials) throws IOException {
      + Credentials containerCredentials = new Credentials();
      + containerCredentials.addAll(credentials);
      + DataOutputBuffer containerTokens_dob = new DataOutputBuffer();
      + containerCredentials.writeTokenStorageToStream(containerTokens_dob);
      + return ByteBuffer.wrap(containerTokens_dob.getData(), 0, containerTokens_dob.getLength());
      + }
      +
      + private static class LlapRecordReaderTaskUmbilicalExternalResponder implements LlapTaskUmbilicalExternalResponder {
      + protected LlapBaseRecordReader recordReader = null;
      + protected LinkedBlockingQueue<ReaderEvent> queuedEvents = new LinkedBlockingQueue<ReaderEvent>();
      +
      + public LlapRecordReaderTaskUmbilicalExternalResponder() {
      + }
      +
      + @Override
      + public void submissionFailed(String fragmentId, Throwable throwable) {
      + try {
      + sendOrQueueEvent(ReaderEvent.errorEvent(
      + "Received submission failed event for fragment ID " + fragmentId));
      + } catch (Exception err) {
      + LOG.error("Error during heartbeat responder:", err);
      + }
      + }
      +
      + @Override
      + public void heartbeat(TezHeartbeatRequest request) {
      + TezTaskAttemptID taskAttemptId = request.getCurrentTaskAttemptID();
      + List<TezEvent> inEvents = request.getEvents();
      + for (TezEvent tezEvent : ListUtils.emptyIfNull(inEvents)) {
      + EventType eventType = tezEvent.getEventType();
      + try {
      + switch (eventType) {
      + case TASK_ATTEMPT_COMPLETED_EVENT:
      + sendOrQueueEvent(ReaderEvent.doneEvent());
      + break;
      + case TASK_ATTEMPT_FAILED_EVENT:
      + TaskAttemptFailedEvent taskFailedEvent = (TaskAttemptFailedEvent) tezEvent.getEvent();
      + sendOrQueueEvent(ReaderEvent.errorEvent(taskFailedEvent.getDiagnostics()));
      + break;
      + case TASK_STATUS_UPDATE_EVENT:
      + // If we want to handle counters
      + break;
      + default:
      + LOG.warn("Unhandled event type " + eventType);
      + break;
      + }
      + } catch (Exception err) {
      + LOG.error("Error during heartbeat responder:", err);
      + }
      + }
      + }
      +
      + @Override
      + public void taskKilled(TezTaskAttemptID taskAttemptId) {
      + try {
      + sendOrQueueEvent(ReaderEvent.errorEvent(
      + "Received task killed event for task ID " + taskAttemptId));
      + } catch (Exception err) {
      + LOG.error("Error during heartbeat responder:", err);
      + }
      + }
      +
      + @Override
      + public void heartbeatTimeout(String taskAttemptId) {
      + try {
      + sendOrQueueEvent(ReaderEvent.errorEvent(
      + "Timed out waiting for heartbeat for task ID " + taskAttemptId));
      + } catch (Exception err) {
      + LOG.error("Error during heartbeat responder:", err);
      + }
      + }
      +
      + public synchronized LlapBaseRecordReader getRecordReader() {
      + return recordReader;
      + }
      +
      + public synchronized void setRecordReader(LlapBaseRecordReader recordReader) {
      + this.recordReader = recordReader;
      +
      + if (recordReader == null) {
      + return;
      + }
      +
      + // If any events were queued by the responder, give them to the record reader now.
      + while (!queuedEvents.isEmpty()) {
      + ReaderEvent readerEvent = queuedEvents.poll();
      + LOG.debug("Sending queued event to record reader: " + readerEvent.getEventType());
      + recordReader.handleEvent(readerEvent);
      + }
      + }
      +
      + /**
      + * Send the ReaderEvents to the record reader, if it is registered to this responder.
      + * If there is no registered record reader, add them to a list of pending reader events
      + * since we don't want to drop these events.
      + * @param readerEvent
      + */
      + protected synchronized void sendOrQueueEvent(ReaderEvent readerEvent) {
      + LlapBaseRecordReader recordReader = getRecordReader();
      + if (recordReader != null) {
      + recordReader.handleEvent(readerEvent);
      + } else {
      + if (LOG.isDebugEnabled()) {
      + LOG.debug("No registered record reader, queueing event " + readerEvent.getEventType()
      + + " with message " + readerEvent.getMessage());
      + }
      +
      + try {
      + queuedEvents.put(readerEvent);
      + } catch (Exception err) {
      + throw new RuntimeException("Unexpected exception while queueing reader event", err);
      + }
      + }
      + }
      +
      + /**
      + * Clear the list of queued reader events if we are not interested in sending any pending events to any registering record reader.
      + */
      + public void clearQueuedEvents() {
      + queuedEvents.clear();
      + }
      + }
      +}

    http://git-wip-us.apache.org/repos/asf/hive/blob/e0579097/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    ----------------------------------------------------------------------
    diff --cc llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    index d8367ce,2bfe3ed..2524dc2
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    @@@ -263,13 -267,12 +267,12 @@@ public class ContainerRunnerImpl extend
               new QueryIdentifier(request.getQueryIdentifier().getAppIdentifier(),
                   request.getQueryIdentifier().getDagIdentifier());
           LOG.info("Processing queryComplete notification for {}", queryIdentifier);
    - List<QueryFragmentInfo> knownFragments =
    - queryTracker
    - .queryComplete(queryIdentifier, request.getDeleteDelay());
    - LOG.info("Pending fragment count for completed query {} = {}", queryIdentifier,
    + List<QueryFragmentInfo> knownFragments = queryTracker.queryComplete(
    + queryIdentifier, request.getDeleteDelay(), false);
    + LOG.info("DBG: Pending fragment count for completed query {} = {}", queryIdentifier,
               knownFragments.size());
           for (QueryFragmentInfo fragmentInfo : knownFragments) {
      - LOG.info("DBG: Issuing killFragment for completed query {} {}", queryIdentifier,
      + LOG.info("Issuing killFragment for completed query {} {}", queryIdentifier,
                 fragmentInfo.getFragmentIdentifierString());
             executorService.killFragment(fragmentInfo.getFragmentIdentifierString());
           }

    http://git-wip-us.apache.org/repos/asf/hive/blob/e0579097/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
    ----------------------------------------------------------------------

    http://git-wip-us.apache.org/repos/asf/hive/blob/e0579097/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
    ----------------------------------------------------------------------
    diff --cc llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
    index 4a33373,3093de7..8594ee1
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
    @@@ -134,15 -135,20 +135,18 @@@ public class TaskRunnerCallable extend
           this.memoryAvailable = memoryAvailable;
           this.confParams = confParams;
           this.jobToken = TokenCache.getSessionToken(credentials);
    - this.taskSpec = Converters.getTaskSpecfromProto(request.getFragmentSpec());
    + // TODO: support binary spec here or above
    + this.vertex = request.getWorkSpec().getVertex();
    + this.taskSpec = Converters.getTaskSpecfromProto(
    + vertex, request.getFragmentNumber(), request.getAttemptNumber(), attemptId);
           this.amReporter = amReporter;
           // Register with the AMReporter when the callable is setup. Unregister once it starts running.
      - if (jobToken != null) {
           this.amReporter.registerTask(request.getAmHost(), request.getAmPort(),
    - request.getUser(), jobToken, fragmentInfo.getQueryInfo().getQueryIdentifier());
    + vertex.getUser(), jobToken, fragmentInfo.getQueryInfo().getQueryIdentifier());
      - }
           this.metrics = metrics;
    - this.requestId = request.getFragmentSpec().getFragmentIdentifierString();
    + this.requestId = taskSpec.getTaskAttemptID().toString();
           // TODO Change this to the queryId/Name when that's available.
    - this.queryId = request.getFragmentSpec().getDagName();
    + this.queryId = vertex.getDagName();
           this.killedTaskHandler = killedTaskHandler;
           this.fragmentCompletionHanler = fragmentCompleteHandler;
           this.tezHadoopShim = tezHadoopShim;

    http://git-wip-us.apache.org/repos/asf/hive/blob/e0579097/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
    ----------------------------------------------------------------------
  • Spena at May 6, 2016 at 8:42 pm
    HIVE-13669 : LLAP: io.enabled config is ignored on the server side (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/652f88ad
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/652f88ad
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/652f88ad

    Branch: refs/heads/java8
    Commit: 652f88ad973ebe1668b5663617259795cc007953
    Parents: 212077b
    Author: Sergey Shelukhin <sershe@apache.org>
    Authored: Wed May 4 14:55:01 2016 -0700
    Committer: Sergey Shelukhin <sershe@apache.org>
    Committed: Wed May 4 14:55:01 2016 -0700

    ----------------------------------------------------------------------
      .../org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java | 5 +++--
      1 file changed, 3 insertions(+), 2 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/652f88ad/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
    index d23a44a..e662de9 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
    @@ -322,8 +322,9 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
            fnLocalizer.init();
            fnLocalizer.startLocalizeAllFunctions();
          }
    - LlapProxy.initializeLlapIo(conf);
    -
    + if (isIoEnabled()) {
    + LlapProxy.initializeLlapIo(conf);
    + }
        }

        @Override
  • Spena at May 6, 2016 at 8:42 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    index 4f0c8fd..0e7b745 100644
    --- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    +++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    @@ -167,6 +167,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
         */
        public function create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys);
        /**
    + * @param \metastore\DropConstraintRequest $req
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
    + public function drop_constraint(\metastore\DropConstraintRequest $req);
    + /**
         * @param string $dbname
         * @param string $name
         * @param bool $deleteData
    @@ -2250,6 +2256,60 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
          return;
        }

    + public function drop_constraint(\metastore\DropConstraintRequest $req)
    + {
    + $this->send_drop_constraint($req);
    + $this->recv_drop_constraint();
    + }
    +
    + public function send_drop_constraint(\metastore\DropConstraintRequest $req)
    + {
    + $args = new \metastore\ThriftHiveMetastore_drop_constraint_args();
    + $args->req = $req;
    + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
    + if ($bin_accel)
    + {
    + thrift_protocol_write_binary($this->output_, 'drop_constraint', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
    + }
    + else
    + {
    + $this->output_->writeMessageBegin('drop_constraint', TMessageType::CALL, $this->seqid_);
    + $args->write($this->output_);
    + $this->output_->writeMessageEnd();
    + $this->output_->getTransport()->flush();
    + }
    + }
    +
    + public function recv_drop_constraint()
    + {
    + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
    + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_constraint_result', $this->input_->isStrictRead());
    + else
    + {
    + $rseqid = 0;
    + $fname = null;
    + $mtype = 0;
    +
    + $this->input_->readMessageBegin($fname, $mtype, $rseqid);
    + if ($mtype == TMessageType::EXCEPTION) {
    + $x = new TApplicationException();
    + $x->read($this->input_);
    + $this->input_->readMessageEnd();
    + throw $x;
    + }
    + $result = new \metastore\ThriftHiveMetastore_drop_constraint_result();
    + $result->read($this->input_);
    + $this->input_->readMessageEnd();
    + }
    + if ($result->o1 !== null) {
    + throw $result->o1;
    + }
    + if ($result->o3 !== null) {
    + throw $result->o3;
    + }
    + return;
    + }
    +
        public function drop_table($dbname, $name, $deleteData)
        {
          $this->send_drop_table($dbname, $name, $deleteData);
    @@ -13889,6 +13949,188 @@ class ThriftHiveMetastore_create_table_with_constraints_result {

      }

    +class ThriftHiveMetastore_drop_constraint_args {
    + static $_TSPEC;
    +
    + /**
    + * @var \metastore\DropConstraintRequest
    + */
    + public $req = null;
    +
    + public function __construct($vals=null) {
    + if (!isset(self::$_TSPEC)) {
    + self::$_TSPEC = array(
    + 1 => array(
    + 'var' => 'req',
    + 'type' => TType::STRUCT,
    + 'class' => '\metastore\DropConstraintRequest',
    + ),
    + );
    + }
    + if (is_array($vals)) {
    + if (isset($vals['req'])) {
    + $this->req = $vals['req'];
    + }
    + }
    + }
    +
    + public function getName() {
    + return 'ThriftHiveMetastore_drop_constraint_args';
    + }
    +
    + public function read($input)
    + {
    + $xfer = 0;
    + $fname = null;
    + $ftype = 0;
    + $fid = 0;
    + $xfer += $input->readStructBegin($fname);
    + while (true)
    + {
    + $xfer += $input->readFieldBegin($fname, $ftype, $fid);
    + if ($ftype == TType::STOP) {
    + break;
    + }
    + switch ($fid)
    + {
    + case 1:
    + if ($ftype == TType::STRUCT) {
    + $this->req = new \metastore\DropConstraintRequest();
    + $xfer += $this->req->read($input);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + default:
    + $xfer += $input->skip($ftype);
    + break;
    + }
    + $xfer += $input->readFieldEnd();
    + }
    + $xfer += $input->readStructEnd();
    + return $xfer;
    + }
    +
    + public function write($output) {
    + $xfer = 0;
    + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_constraint_args');
    + if ($this->req !== null) {
    + if (!is_object($this->req)) {
    + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
    + }
    + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1);
    + $xfer += $this->req->write($output);
    + $xfer += $output->writeFieldEnd();
    + }
    + $xfer += $output->writeFieldStop();
    + $xfer += $output->writeStructEnd();
    + return $xfer;
    + }
    +
    +}
    +
    +class ThriftHiveMetastore_drop_constraint_result {
    + static $_TSPEC;
    +
    + /**
    + * @var \metastore\NoSuchObjectException
    + */
    + public $o1 = null;
    + /**
    + * @var \metastore\MetaException
    + */
    + public $o3 = null;
    +
    + public function __construct($vals=null) {
    + if (!isset(self::$_TSPEC)) {
    + self::$_TSPEC = array(
    + 1 => array(
    + 'var' => 'o1',
    + 'type' => TType::STRUCT,
    + 'class' => '\metastore\NoSuchObjectException',
    + ),
    + 2 => array(
    + 'var' => 'o3',
    + 'type' => TType::STRUCT,
    + 'class' => '\metastore\MetaException',
    + ),
    + );
    + }
    + if (is_array($vals)) {
    + if (isset($vals['o1'])) {
    + $this->o1 = $vals['o1'];
    + }
    + if (isset($vals['o3'])) {
    + $this->o3 = $vals['o3'];
    + }
    + }
    + }
    +
    + public function getName() {
    + return 'ThriftHiveMetastore_drop_constraint_result';
    + }
    +
    + public function read($input)
    + {
    + $xfer = 0;
    + $fname = null;
    + $ftype = 0;
    + $fid = 0;
    + $xfer += $input->readStructBegin($fname);
    + while (true)
    + {
    + $xfer += $input->readFieldBegin($fname, $ftype, $fid);
    + if ($ftype == TType::STOP) {
    + break;
    + }
    + switch ($fid)
    + {
    + case 1:
    + if ($ftype == TType::STRUCT) {
    + $this->o1 = new \metastore\NoSuchObjectException();
    + $xfer += $this->o1->read($input);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + case 2:
    + if ($ftype == TType::STRUCT) {
    + $this->o3 = new \metastore\MetaException();
    + $xfer += $this->o3->read($input);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + default:
    + $xfer += $input->skip($ftype);
    + break;
    + }
    + $xfer += $input->readFieldEnd();
    + }
    + $xfer += $input->readStructEnd();
    + return $xfer;
    + }
    +
    + public function write($output) {
    + $xfer = 0;
    + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_constraint_result');
    + if ($this->o1 !== null) {
    + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
    + $xfer += $this->o1->write($output);
    + $xfer += $output->writeFieldEnd();
    + }
    + if ($this->o3 !== null) {
    + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 2);
    + $xfer += $this->o3->write($output);
    + $xfer += $output->writeFieldEnd();
    + }
    + $xfer += $output->writeFieldStop();
    + $xfer += $output->writeStructEnd();
    + return $xfer;
    + }
    +
    +}
    +
      class ThriftHiveMetastore_drop_table_args {
        static $_TSPEC;


    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-php/metastore/Types.php
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php
    index e2fa963..a8a7db9 100644
    --- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
    +++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
    @@ -9153,6 +9153,127 @@ class ForeignKeysResponse {

      }

    +class DropConstraintRequest {
    + static $_TSPEC;
    +
    + /**
    + * @var string
    + */
    + public $dbname = null;
    + /**
    + * @var string
    + */
    + public $tablename = null;
    + /**
    + * @var string
    + */
    + public $constraintname = null;
    +
    + public function __construct($vals=null) {
    + if (!isset(self::$_TSPEC)) {
    + self::$_TSPEC = array(
    + 1 => array(
    + 'var' => 'dbname',
    + 'type' => TType::STRING,
    + ),
    + 2 => array(
    + 'var' => 'tablename',
    + 'type' => TType::STRING,
    + ),
    + 3 => array(
    + 'var' => 'constraintname',
    + 'type' => TType::STRING,
    + ),
    + );
    + }
    + if (is_array($vals)) {
    + if (isset($vals['dbname'])) {
    + $this->dbname = $vals['dbname'];
    + }
    + if (isset($vals['tablename'])) {
    + $this->tablename = $vals['tablename'];
    + }
    + if (isset($vals['constraintname'])) {
    + $this->constraintname = $vals['constraintname'];
    + }
    + }
    + }
    +
    + public function getName() {
    + return 'DropConstraintRequest';
    + }
    +
    + public function read($input)
    + {
    + $xfer = 0;
    + $fname = null;
    + $ftype = 0;
    + $fid = 0;
    + $xfer += $input->readStructBegin($fname);
    + while (true)
    + {
    + $xfer += $input->readFieldBegin($fname, $ftype, $fid);
    + if ($ftype == TType::STOP) {
    + break;
    + }
    + switch ($fid)
    + {
    + case 1:
    + if ($ftype == TType::STRING) {
    + $xfer += $input->readString($this->dbname);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + case 2:
    + if ($ftype == TType::STRING) {
    + $xfer += $input->readString($this->tablename);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + case 3:
    + if ($ftype == TType::STRING) {
    + $xfer += $input->readString($this->constraintname);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + default:
    + $xfer += $input->skip($ftype);
    + break;
    + }
    + $xfer += $input->readFieldEnd();
    + }
    + $xfer += $input->readStructEnd();
    + return $xfer;
    + }
    +
    + public function write($output) {
    + $xfer = 0;
    + $xfer += $output->writeStructBegin('DropConstraintRequest');
    + if ($this->dbname !== null) {
    + $xfer += $output->writeFieldBegin('dbname', TType::STRING, 1);
    + $xfer += $output->writeString($this->dbname);
    + $xfer += $output->writeFieldEnd();
    + }
    + if ($this->tablename !== null) {
    + $xfer += $output->writeFieldBegin('tablename', TType::STRING, 2);
    + $xfer += $output->writeString($this->tablename);
    + $xfer += $output->writeFieldEnd();
    + }
    + if ($this->constraintname !== null) {
    + $xfer += $output->writeFieldBegin('constraintname', TType::STRING, 3);
    + $xfer += $output->writeString($this->constraintname);
    + $xfer += $output->writeFieldEnd();
    + }
    + $xfer += $output->writeFieldStop();
    + $xfer += $output->writeStructEnd();
    + return $xfer;
    + }
    +
    +}
    +
      class PartitionsByExprResult {
        static $_TSPEC;


    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    index 3ec46f1..5323d9f 100755
    --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    @@ -43,6 +43,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
        print(' void create_table(Table tbl)')
        print(' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)')
        print(' void create_table_with_constraints(Table tbl, primaryKeys, foreignKeys)')
    + print(' void drop_constraint(DropConstraintRequest req)')
        print(' void drop_table(string dbname, string name, bool deleteData)')
        print(' void drop_table_with_environment_context(string dbname, string name, bool deleteData, EnvironmentContext environment_context)')
        print(' get_tables(string db_name, string pattern)')
    @@ -353,6 +354,12 @@ elif cmd == 'create_table_with_constraints':
          sys.exit(1)
        pp.pprint(client.create_table_with_constraints(eval(args[0]),eval(args[1]),eval(args[2]),))

    +elif cmd == 'drop_constraint':
    + if len(args) != 1:
    + print('drop_constraint requires 1 args')
    + sys.exit(1)
    + pp.pprint(client.drop_constraint(eval(args[0]),))
    +
      elif cmd == 'drop_table':
        if len(args) != 3:
          print('drop_table requires 3 args')

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
    index 119a5f1..bf8d383 100644
    --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
    +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
    @@ -165,6 +165,13 @@ class Iface(fb303.FacebookService.Iface):
          """
          pass

    + def drop_constraint(self, req):
    + """
    + Parameters:
    + - req
    + """
    + pass
    +
        def drop_table(self, dbname, name, deleteData):
          """
          Parameters:
    @@ -1875,6 +1882,39 @@ class Client(fb303.FacebookService.Client, Iface):
            raise result.o4
          return

    + def drop_constraint(self, req):
    + """
    + Parameters:
    + - req
    + """
    + self.send_drop_constraint(req)
    + self.recv_drop_constraint()
    +
    + def send_drop_constraint(self, req):
    + self._oprot.writeMessageBegin('drop_constraint', TMessageType.CALL, self._seqid)
    + args = drop_constraint_args()
    + args.req = req
    + args.write(self._oprot)
    + self._oprot.writeMessageEnd()
    + self._oprot.trans.flush()
    +
    + def recv_drop_constraint(self):
    + iprot = self._iprot
    + (fname, mtype, rseqid) = iprot.readMessageBegin()
    + if mtype == TMessageType.EXCEPTION:
    + x = TApplicationException()
    + x.read(iprot)
    + iprot.readMessageEnd()
    + raise x
    + result = drop_constraint_result()
    + result.read(iprot)
    + iprot.readMessageEnd()
    + if result.o1 is not None:
    + raise result.o1
    + if result.o3 is not None:
    + raise result.o3
    + return
    +
        def drop_table(self, dbname, name, deleteData):
          """
          Parameters:
    @@ -6499,6 +6539,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
          self._processMap["create_table"] = Processor.process_create_table
          self._processMap["create_table_with_environment_context"] = Processor.process_create_table_with_environment_context
          self._processMap["create_table_with_constraints"] = Processor.process_create_table_with_constraints
    + self._processMap["drop_constraint"] = Processor.process_drop_constraint
          self._processMap["drop_table"] = Processor.process_drop_table
          self._processMap["drop_table_with_environment_context"] = Processor.process_drop_table_with_environment_context
          self._processMap["get_tables"] = Processor.process_get_tables
    @@ -7141,6 +7182,31 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
          oprot.writeMessageEnd()
          oprot.trans.flush()

    + def process_drop_constraint(self, seqid, iprot, oprot):
    + args = drop_constraint_args()
    + args.read(iprot)
    + iprot.readMessageEnd()
    + result = drop_constraint_result()
    + try:
    + self._handler.drop_constraint(args.req)
    + msg_type = TMessageType.REPLY
    + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
    + raise
    + except NoSuchObjectException as o1:
    + msg_type = TMessageType.REPLY
    + result.o1 = o1
    + except MetaException as o3:
    + msg_type = TMessageType.REPLY
    + result.o3 = o3
    + except Exception as ex:
    + msg_type = TMessageType.EXCEPTION
    + logging.exception(ex)
    + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
    + oprot.writeMessageBegin("drop_constraint", msg_type, seqid)
    + result.write(oprot)
    + oprot.writeMessageEnd()
    + oprot.trans.flush()
    +
        def process_drop_table(self, seqid, iprot, oprot):
          args = drop_table_args()
          args.read(iprot)
    @@ -13467,6 +13533,152 @@ class create_table_with_constraints_result:
        def __ne__(self, other):
          return not (self == other)

    +class drop_constraint_args:
    + """
    + Attributes:
    + - req
    + """
    +
    + thrift_spec = (
    + None, # 0
    + (1, TType.STRUCT, 'req', (DropConstraintRequest, DropConstraintRequest.thrift_spec), None, ), # 1
    + )
    +
    + def __init__(self, req=None,):
    + self.req = req
    +
    + def read(self, iprot):
    + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
    + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
    + return
    + iprot.readStructBegin()
    + while True:
    + (fname, ftype, fid) = iprot.readFieldBegin()
    + if ftype == TType.STOP:
    + break
    + if fid == 1:
    + if ftype == TType.STRUCT:
    + self.req = DropConstraintRequest()
    + self.req.read(iprot)
    + else:
    + iprot.skip(ftype)
    + else:
    + iprot.skip(ftype)
    + iprot.readFieldEnd()
    + iprot.readStructEnd()
    +
    + def write(self, oprot):
    + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
    + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
    + return
    + oprot.writeStructBegin('drop_constraint_args')
    + if self.req is not None:
    + oprot.writeFieldBegin('req', TType.STRUCT, 1)
    + self.req.write(oprot)
    + oprot.writeFieldEnd()
    + oprot.writeFieldStop()
    + oprot.writeStructEnd()
    +
    + def validate(self):
    + return
    +
    +
    + def __hash__(self):
    + value = 17
    + value = (value * 31) ^ hash(self.req)
    + return value
    +
    + def __repr__(self):
    + L = ['%s=%r' % (key, value)
    + for key, value in self.__dict__.iteritems()]
    + return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
    +
    + def __eq__(self, other):
    + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
    +
    + def __ne__(self, other):
    + return not (self == other)
    +
    +class drop_constraint_result:
    + """
    + Attributes:
    + - o1
    + - o3
    + """
    +
    + thrift_spec = (
    + None, # 0
    + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1
    + (2, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 2
    + )
    +
    + def __init__(self, o1=None, o3=None,):
    + self.o1 = o1
    + self.o3 = o3
    +
    + def read(self, iprot):
    + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
    + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
    + return
    + iprot.readStructBegin()
    + while True:
    + (fname, ftype, fid) = iprot.readFieldBegin()
    + if ftype == TType.STOP:
    + break
    + if fid == 1:
    + if ftype == TType.STRUCT:
    + self.o1 = NoSuchObjectException()
    + self.o1.read(iprot)
    + else:
    + iprot.skip(ftype)
    + elif fid == 2:
    + if ftype == TType.STRUCT:
    + self.o3 = MetaException()
    + self.o3.read(iprot)
    + else:
    + iprot.skip(ftype)
    + else:
    + iprot.skip(ftype)
    + iprot.readFieldEnd()
    + iprot.readStructEnd()
    +
    + def write(self, oprot):
    + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
    + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
    + return
    + oprot.writeStructBegin('drop_constraint_result')
    + if self.o1 is not None:
    + oprot.writeFieldBegin('o1', TType.STRUCT, 1)
    + self.o1.write(oprot)
    + oprot.writeFieldEnd()
    + if self.o3 is not None:
    + oprot.writeFieldBegin('o3', TType.STRUCT, 2)
    + self.o3.write(oprot)
    + oprot.writeFieldEnd()
    + oprot.writeFieldStop()
    + oprot.writeStructEnd()
    +
    + def validate(self):
    + return
    +
    +
    + def __hash__(self):
    + value = 17
    + value = (value * 31) ^ hash(self.o1)
    + value = (value * 31) ^ hash(self.o3)
    + return value
    +
    + def __repr__(self):
    + L = ['%s=%r' % (key, value)
    + for key, value in self.__dict__.iteritems()]
    + return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
    +
    + def __eq__(self, other):
    + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
    +
    + def __ne__(self, other):
    + return not (self == other)
    +
      class drop_table_args:
        """
        Attributes:

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
    index f008788..8e0cb71 100644
    --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
    +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
    @@ -6264,6 +6264,103 @@ class ForeignKeysResponse:
        def __ne__(self, other):
          return not (self == other)

    +class DropConstraintRequest:
    + """
    + Attributes:
    + - dbname
    + - tablename
    + - constraintname
    + """
    +
    + thrift_spec = (
    + None, # 0
    + (1, TType.STRING, 'dbname', None, None, ), # 1
    + (2, TType.STRING, 'tablename', None, None, ), # 2
    + (3, TType.STRING, 'constraintname', None, None, ), # 3
    + )
    +
    + def __init__(self, dbname=None, tablename=None, constraintname=None,):
    + self.dbname = dbname
    + self.tablename = tablename
    + self.constraintname = constraintname
    +
    + def read(self, iprot):
    + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
    + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
    + return
    + iprot.readStructBegin()
    + while True:
    + (fname, ftype, fid) = iprot.readFieldBegin()
    + if ftype == TType.STOP:
    + break
    + if fid == 1:
    + if ftype == TType.STRING:
    + self.dbname = iprot.readString()
    + else:
    + iprot.skip(ftype)
    + elif fid == 2:
    + if ftype == TType.STRING:
    + self.tablename = iprot.readString()
    + else:
    + iprot.skip(ftype)
    + elif fid == 3:
    + if ftype == TType.STRING:
    + self.constraintname = iprot.readString()
    + else:
    + iprot.skip(ftype)
    + else:
    + iprot.skip(ftype)
    + iprot.readFieldEnd()
    + iprot.readStructEnd()
    +
    + def write(self, oprot):
    + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
    + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
    + return
    + oprot.writeStructBegin('DropConstraintRequest')
    + if self.dbname is not None:
    + oprot.writeFieldBegin('dbname', TType.STRING, 1)
    + oprot.writeString(self.dbname)
    + oprot.writeFieldEnd()
    + if self.tablename is not None:
    + oprot.writeFieldBegin('tablename', TType.STRING, 2)
    + oprot.writeString(self.tablename)
    + oprot.writeFieldEnd()
    + if self.constraintname is not None:
    + oprot.writeFieldBegin('constraintname', TType.STRING, 3)
    + oprot.writeString(self.constraintname)
    + oprot.writeFieldEnd()
    + oprot.writeFieldStop()
    + oprot.writeStructEnd()
    +
    + def validate(self):
    + if self.dbname is None:
    + raise TProtocol.TProtocolException(message='Required field dbname is unset!')
    + if self.tablename is None:
    + raise TProtocol.TProtocolException(message='Required field tablename is unset!')
    + if self.constraintname is None:
    + raise TProtocol.TProtocolException(message='Required field constraintname is unset!')
    + return
    +
    +
    + def __hash__(self):
    + value = 17
    + value = (value * 31) ^ hash(self.dbname)
    + value = (value * 31) ^ hash(self.tablename)
    + value = (value * 31) ^ hash(self.constraintname)
    + return value
    +
    + def __repr__(self):
    + L = ['%s=%r' % (key, value)
    + for key, value in self.__dict__.iteritems()]
    + return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
    +
    + def __eq__(self, other):
    + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
    +
    + def __ne__(self, other):
    + return not (self == other)
    +
      class PartitionsByExprResult:
        """
        Attributes:

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
    index 4a24a19..4d3e49d 100644
    --- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
    +++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
    @@ -1390,6 +1390,29 @@ class ForeignKeysResponse
        ::Thrift::Struct.generate_accessors self
      end

    +class DropConstraintRequest
    + include ::Thrift::Struct, ::Thrift::Struct_Union
    + DBNAME = 1
    + TABLENAME = 2
    + CONSTRAINTNAME = 3
    +
    + FIELDS = {
    + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
    + TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename'},
    + CONSTRAINTNAME => {:type => ::Thrift::Types::STRING, :name => 'constraintname'}
    + }
    +
    + def struct_fields; FIELDS; end
    +
    + def validate
    + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbname is unset!') unless @dbname
    + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tablename is unset!') unless @tablename
    + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field constraintname is unset!') unless @constraintname
    + end
    +
    + ::Thrift::Struct.generate_accessors self
    +end
    +
      class PartitionsByExprResult
        include ::Thrift::Struct, ::Thrift::Struct_Union
        PARTITIONS = 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
    index 99a764e..61d1832 100644
    --- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
    +++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
    @@ -336,6 +336,22 @@ module ThriftHiveMetastore
            return
          end

    + def drop_constraint(req)
    + send_drop_constraint(req)
    + recv_drop_constraint()
    + end
    +
    + def send_drop_constraint(req)
    + send_message('drop_constraint', Drop_constraint_args, :req => req)
    + end
    +
    + def recv_drop_constraint()
    + result = receive_message(Drop_constraint_result)
    + raise result.o1 unless result.o1.nil?
    + raise result.o3 unless result.o3.nil?
    + return
    + end
    +
          def drop_table(dbname, name, deleteData)
            send_drop_table(dbname, name, deleteData)
            recv_drop_table()
    @@ -2704,6 +2720,19 @@ module ThriftHiveMetastore
            write_result(result, oprot, 'create_table_with_constraints', seqid)
          end

    + def process_drop_constraint(seqid, iprot, oprot)
    + args = read_args(iprot, Drop_constraint_args)
    + result = Drop_constraint_result.new()
    + begin
    + @handler.drop_constraint(args.req)
    + rescue ::NoSuchObjectException => o1
    + result.o1 = o1
    + rescue ::MetaException => o3
    + result.o3 = o3
    + end
    + write_result(result, oprot, 'drop_constraint', seqid)
    + end
    +
          def process_drop_table(seqid, iprot, oprot)
            args = read_args(iprot, Drop_table_args)
            result = Drop_table_result.new()
    @@ -4954,6 +4983,40 @@ module ThriftHiveMetastore
          ::Thrift::Struct.generate_accessors self
        end

    + class Drop_constraint_args
    + include ::Thrift::Struct, ::Thrift::Struct_Union
    + REQ = 1
    +
    + FIELDS = {
    + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::DropConstraintRequest}
    + }
    +
    + def struct_fields; FIELDS; end
    +
    + def validate
    + end
    +
    + ::Thrift::Struct.generate_accessors self
    + end
    +
    + class Drop_constraint_result
    + include ::Thrift::Struct, ::Thrift::Struct_Union
    + O1 = 1
    + O3 = 2
    +
    + FIELDS = {
    + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
    + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}
    + }
    +
    + def struct_fields; FIELDS; end
    +
    + def validate
    + end
    +
    + ::Thrift::Struct.generate_accessors self
    + end
    +
        class Drop_table_args
          include ::Thrift::Struct, ::Thrift::Struct_Union
          DBNAME = 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    index 4ada9c1..9a09e7a 100644
    --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    @@ -1483,6 +1483,35 @@ public class HiveMetaStore extends ThriftHiveMetastore {
              endFunction("create_table", success, ex, tbl.getTableName());
            }
          }
    +
    + @Override
    + public void drop_constraint(DropConstraintRequest req)
    + throws MetaException, InvalidObjectException {
    + String dbName = req.getDbname();
    + String tableName = req.getTablename();
    + String constraintName = req.getConstraintname();
    + startFunction("drop_constraint", ": " + constraintName.toString());
    + boolean success = false;
    + Exception ex = null;
    + try {
    + getMS().dropConstraint(dbName, tableName, constraintName);
    + success = true;
    + } catch (NoSuchObjectException e) {
    + ex = e;
    + throw new InvalidObjectException(e.getMessage());
    + } catch (Exception e) {
    + ex = e;
    + if (e instanceof MetaException) {
    + throw (MetaException) e;
    + } else if (e instanceof InvalidObjectException) {
    + throw (InvalidObjectException) e;
    + } else {
    + throw newMetaException(e);
    + }
    + } finally {
    + endFunction("drop_constraint", success, ex, constraintName);
    + }
    + }
          private boolean is_table_exists(RawStore ms, String dbname, String name)
              throws MetaException {
            return (ms.getTable(dbname, name) != null);

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
    index 7d37d07..75fea5b 100644
    --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
    +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
    @@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.api.CompactionType;
      import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
      import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
      import org.apache.hadoop.hive.metastore.api.Database;
    +import org.apache.hadoop.hive.metastore.api.DropConstraintRequest;
      import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr;
      import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest;
      import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
    @@ -765,6 +766,11 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
          }
        }

    + @Override
    + public void dropConstraint(String dbName, String tableName, String constraintName) throws
    + NoSuchObjectException, MetaException, TException {
    + client.drop_constraint(new DropConstraintRequest(dbName, tableName, constraintName));
    + }

      /**
         * @param type

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
    index c900a2d..3965475 100644
    --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
    +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
    @@ -1570,4 +1570,7 @@ public interface IMetaStoreClient {
          List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys)
          throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException;

    + void dropConstraint(String dbName, String tableName, String constraintName) throws
    + MetaException, NoSuchObjectException, TException;
    +
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
    index f651a13..5c49be9 100644
    --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
    +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
    @@ -1025,7 +1025,8 @@ public class ObjectStore implements RawStore, Configurable {
                " table " + tableName + " record to delete");
              }

    - List<MConstraint> tabConstraints = listAllTableConstraints(dbName, tableName);
    + List<MConstraint> tabConstraints = listAllTableConstraintsWithOptionalConstraintName(
    + dbName, tableName, null);
              if (tabConstraints != null && tabConstraints.size() > 0) {
                pm.deletePersistentAll(tabConstraints);
              }
    @@ -1043,19 +1044,27 @@ public class ObjectStore implements RawStore, Configurable {
          return success;
        }

    - private List<MConstraint> listAllTableConstraints(String dbName, String tableName) {
    + private List<MConstraint> listAllTableConstraintsWithOptionalConstraintName
    + (String dbName, String tableName, String constraintname) {
          List<MConstraint> mConstraints = null;
          List<String> constraintNames = new ArrayList<String>();
          Query query = null;

          try {
            query = pm.newQuery("select constraintName from org.apache.hadoop.hive.metastore.model.MConstraint where "
    - + "(parentTable.tableName == ptblname && parentTable.database.name == pdbname) || "
    - + "(childTable != null && childTable.tableName == ctblname && childTable.database.name == cdbname)");
    + + "((parentTable.tableName == ptblname && parentTable.database.name == pdbname) || "
    + + "(childTable != null && childTable.tableName == ctblname && "
    + + "childTable.database.name == cdbname)) " + (constraintname != null ?
    + " && constraintName == constraintname" : ""));
            query.declareParameters("java.lang.String ptblname, java.lang.String pdbname,"
    - + "java.lang.String ctblname, java.lang.String cdbname");
    - Collection<?> constraintNamesColl = (Collection<?>) query.
    - executeWithArray(tableName, dbName, tableName, dbName);
    + + "java.lang.String ctblname, java.lang.String cdbname" +
    + (constraintname != null ? ", java.lang.String constraintname" : ""));
    + Collection<?> constraintNamesColl =
    + constraintname != null ?
    + ((Collection<?>) query.
    + executeWithArray(tableName, dbName, tableName, dbName, constraintname)):
    + ((Collection<?>) query.
    + executeWithArray(tableName, dbName, tableName, dbName));
            for (Iterator<?> i = constraintNamesColl.iterator(); i.hasNext();) {
              String currName = (String) i.next();
              constraintNames.add(currName);
    @@ -8389,4 +8398,27 @@ public class ObjectStore implements RawStore, Configurable {
          return foreignKeys;
        }

    + @Override
    + public void dropConstraint(String dbName, String tableName,
    + String constraintName) throws NoSuchObjectException {
    + boolean success = false;
    + try {
    + openTransaction();
    +
    + List<MConstraint> tabConstraints = listAllTableConstraintsWithOptionalConstraintName(
    + dbName, tableName, constraintName);
    + if (tabConstraints != null && tabConstraints.size() > 0) {
    + pm.deletePersistentAll(tabConstraints);
    + } else {
    + throw new NoSuchObjectException("The constraint: " + constraintName +
    + " does not exist for the associated table: " + dbName + "." + tableName);
    + }
    + success = commitTransaction();
    + } finally {
    + if (!success) {
    + rollbackTransaction();
    + }
    + }
    + }
    +
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
    index 100c396..06b8135 100644
    --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
    +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
    @@ -675,4 +675,6 @@ public interface RawStore extends Configurable {

        void createTableWithConstraints(Table tbl, List<SQLPrimaryKey> primaryKeys,
          List<SQLForeignKey> foreignKeys) throws InvalidObjectException, MetaException;
    +
    + void dropConstraint(String dbName, String tableName, String constraintName) throws NoSuchObjectException;
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
    index d4e5da4..ec5b92c 100644
    --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
    +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
    @@ -2615,4 +2615,10 @@ public class HBaseStore implements RawStore {
          throws InvalidObjectException, MetaException {
          // TODO Auto-generated method stub
        }
    +
    + @Override
    + public void dropConstraint(String dbName, String tableName,
    + String constraintName) throws NoSuchObjectException {
    + // TODO Auto-generated method stub
    + }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
    index 86e7bea..63fcb28 100644
    --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
    +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
    @@ -844,4 +844,10 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
          throws InvalidObjectException, MetaException {
          // TODO Auto-generated method stub
        }
    +
    + @Override
    + public void dropConstraint(String dbName, String tableName,
    + String constraintName) throws NoSuchObjectException {
    + // TODO Auto-generated method stub
    + }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
    index 5b32f00..386c70a 100644
    --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
    +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
    @@ -860,6 +860,12 @@ public class DummyRawStoreForJdoConnection implements RawStore {
          throws InvalidObjectException, MetaException {
          // TODO Auto-generated method stub
        }
    +
    + @Override
    + public void dropConstraint(String dbName, String tableName,
    + String constraintName) throws NoSuchObjectException {
    + // TODO Auto-generated method stub
    + }
      }



    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    index 9887d77..c4d3bfb 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    @@ -131,6 +131,7 @@ import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc;
      import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
      import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer;
      import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
    +import org.apache.hadoop.hive.ql.parse.SemanticException;
      import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
      import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
      import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
    @@ -356,7 +357,11 @@ public class DDLTask extends Task<DDLWork> implements Serializable {

            AlterTableDesc alterTbl = work.getAlterTblDesc();
            if (alterTbl != null) {
    - return alterTable(db, alterTbl);
    + if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT ) {
    + return dropConstraint(db, alterTbl);
    + } else {
    + return alterTable(db, alterTbl);
    + }
            }

            CreateViewDesc crtView = work.getCreateViewDesc();
    @@ -3596,7 +3601,19 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
          return 0;
        }

    - /**
    + private int dropConstraint(Hive db, AlterTableDesc alterTbl)
    + throws SemanticException, HiveException {
    + try {
    + db.dropConstraint(Utilities.getDatabaseName(alterTbl.getOldName()),
    + Utilities.getTableName(alterTbl.getOldName()),
    + alterTbl.getConstraintName());
    + } catch (NoSuchObjectException e) {
    + throw new HiveException(e);
    + }
    + return 0;
    + }
    +
    + /**
         * Drop a given table or some partitions. DropTableDesc is currently used for both.
         *
         * @param db

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
    index 515f8b2..2194a6d 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
    @@ -203,7 +203,8 @@ public class WriteEntity extends Entity implements Serializable {
            case ADDCOLS:
            case RENAME:
            case TRUNCATE:
    - case MERGEFILES: return WriteType.DDL_EXCLUSIVE;
    + case MERGEFILES:
    + case DROPCONSTRAINT: return WriteType.DDL_EXCLUSIVE;

            case ADDPARTITION:
            case ADDSERDEPROPS:

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    index 26c458c..6862f70 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    @@ -3593,4 +3593,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
          }
        }

    + public void dropConstraint(String dbName, String tableName, String constraintName)
    + throws HiveException, NoSuchObjectException {
    + try {
    + getMSC().dropConstraint(dbName, tableName, constraintName);
    + } catch (Exception e) {
    + throw new HiveException(e);
    + }
    + }
    +
      };

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    index 04e2a41..4a6617f 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    @@ -26,7 +26,6 @@ import org.slf4j.Logger;
      import org.slf4j.LoggerFactory;
      import org.apache.hadoop.fs.FileSystem;
      import org.apache.hadoop.fs.Path;
    -import org.apache.hadoop.hive.common.FileUtils;
      import org.apache.hadoop.hive.common.JavaUtils;
      import org.apache.hadoop.hive.common.StatsSetupConst;
      import org.apache.hadoop.hive.conf.HiveConf;
    @@ -88,7 +87,6 @@ import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc;
      import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
      import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
      import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
    -import org.apache.hadoop.hive.ql.plan.DDLDesc;
      import org.apache.hadoop.hive.ql.plan.DDLWork;
      import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc;
      import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
    @@ -321,6 +319,8 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
              analyzeAlterTableCompact(ast, tableName, partSpec);
            } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS){
              analyzeAlterTableUpdateStats(ast, tableName, partSpec);
    + } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_DROPCONSTRAINT) {
    + analyzeAlterTableDropConstraint(ast, tableName);
            }
            break;
          }
    @@ -1740,6 +1740,15 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
          rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));
        }

    + private void analyzeAlterTableDropConstraint(ASTNode ast, String tableName)
    + throws SemanticException {
    + String dropConstraintName = unescapeIdentifier(ast.getChild(0).getText());
    + AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, dropConstraintName);
    +
    + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
    + alterTblDesc), conf));
    + }
    +
        static HashMap<String, String> getProps(ASTNode prop) {
          // Must be deterministic order map for consistent q-test output across Java versions
          HashMap<String, String> mapProp = new LinkedHashMap<String, String>();

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
    index 6531b03..2c66396 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
    @@ -179,6 +179,7 @@ TOK_ALTERTABLE_SKEWED_LOCATION;
      TOK_ALTERTABLE_BUCKETS;
      TOK_ALTERTABLE_CLUSTER_SORT;
      TOK_ALTERTABLE_COMPACT;
    +TOK_ALTERTABLE_DROPCONSTRAINT;
      TOK_ALTERINDEX_REBUILD;
      TOK_ALTERINDEX_PROPERTIES;
      TOK_MSCK;
    @@ -1040,6 +1041,7 @@ alterTableStatementSuffix
    alterStatementSuffixSkewedby
    alterStatementSuffixExchangePartition
    alterStatementPartitionKeyType
    + | alterStatementSuffixDropConstraint
    partitionSpec? alterTblPartitionStatementSuffix -> alterTblPartitionStatementSuffix partitionSpec?
          ;

    @@ -1129,6 +1131,13 @@ alterStatementSuffixAddCol
          -> ^(TOK_ALTERTABLE_REPLACECOLS columnNameTypeList restrictOrCascade?)
          ;

    +alterStatementSuffixDropConstraint
    +@init { pushMsg("drop constraint statement", state); }
    +@after { popMsg(state); }
    + : KW_DROP KW_CONSTRAINT cName=identifier
    + ->^(TOK_ALTERTABLE_DROPCONSTRAINT $cName)
    + ;
    +
      alterStatementSuffixRenameCol
      @init { pushMsg("rename column name", state); }
      @after { popMsg(state); }

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
    index fb8a33c..7b83381 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
    @@ -62,6 +62,7 @@ public final class SemanticAnalyzerFactory {
          commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES);
          commandType.put(HiveParser.TOK_ALTERTABLE_DROPPROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES);
          commandType.put(HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION, HiveOperation.ALTERTABLE_EXCHANGEPARTITION);
    + commandType.put(HiveParser.TOK_ALTERTABLE_DROPCONSTRAINT, HiveOperation.ALTERTABLE_DROPCONSTRAINT);
          commandType.put(HiveParser.TOK_SHOWDATABASES, HiveOperation.SHOWDATABASES);
          commandType.put(HiveParser.TOK_SHOWTABLES, HiveOperation.SHOWTABLES);
          commandType.put(HiveParser.TOK_SHOWCOLUMNS, HiveOperation.SHOWCOLUMNS);
    @@ -195,6 +196,7 @@ public final class SemanticAnalyzerFactory {
                case HiveParser.TOK_ALTERTABLE_DROPPROPERTIES:
                case HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION:
                case HiveParser.TOK_ALTERTABLE_SKEWED:
    + case HiveParser.TOK_ALTERTABLE_DROPCONSTRAINT:
                queryState.setCommandType(commandType.get(child.getType()));
                return new DDLSemanticAnalyzer(queryState);
              }

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
    index 4ba51ec..38d8d5a 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
    @@ -56,7 +56,7 @@ public class AlterTableDesc extends DDLDesc implements Serializable {
          DROPPARTITION("drop partition"), RENAMEPARTITION("rename partition"), ADDSKEWEDBY("add skew column"),
          ALTERSKEWEDLOCATION("alter skew location"), ALTERBUCKETNUM("alter bucket number"),
          ALTERPARTITION("alter partition"), COMPACT("compact"),
    - TRUNCATE("truncate"), MERGEFILES("merge files");
    + TRUNCATE("truncate"), MERGEFILES("merge files"), DROPCONSTRAINT("drop constraint");
          ;

          private final String name;
    @@ -116,6 +116,7 @@ public class AlterTableDesc extends DDLDesc implements Serializable {
        boolean isTurnOffSorting = false;
        boolean isCascade = false;
        EnvironmentContext environmentContext;
    + String dropConstraintName;

        public AlterTableDesc() {
        }
    @@ -263,6 +264,12 @@ public class AlterTableDesc extends DDLDesc implements Serializable {
          this.numberBuckets = numBuckets;
        }

    + public AlterTableDesc(String tableName, String dropConstraintName) {
    + this.oldName = tableName;
    + this.dropConstraintName = dropConstraintName;
    + op = AlterTableTypes.DROPCONSTRAINT;
    + }
    +
        @Explain(displayName = "new columns", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
        public List<String> getNewColsString() {
          return Utilities.getFieldSchemaString(getNewCols());
    @@ -408,6 +415,22 @@ public class AlterTableDesc extends DDLDesc implements Serializable {
        }

        /**
    + * @return the drop constraint name of the table
    + */
    + @Explain(displayName = "drop constraint name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
    + public String getConstraintName() {
    + return dropConstraintName;
    + }
    +
    + /**
    + * @param constraintName
    + * the dropConstraintName to set
    + */
    + public void setDropConstraintName(String constraintName) {
    + this.dropConstraintName = constraintName;
    + }
    +
    + /**
         * @param storageHandler
         * the storage handler to set
         */

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
    index 188cd6f..e651016 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
    @@ -115,6 +115,8 @@ public enum HiveOperation {
            new Privilege[] {Privilege.ALTER_DATA}, null),
        ALTERTABLE_PARTCOLTYPE("ALTERTABLE_PARTCOLTYPE", new Privilege[] { Privilege.SELECT }, new Privilege[] { Privilege.ALTER_DATA }),
        ALTERTABLE_EXCHANGEPARTITION("ALTERTABLE_EXCHANGEPARTITION", null, null),
    + ALTERTABLE_DROPCONSTRAINT("ALTERTABLE_DROPCONSTRAINT",
    + new Privilege[]{Privilege.ALTER_METADATA}, null),
        ALTERVIEW_RENAME("ALTERVIEW_RENAME", new Privilege[] {Privilege.ALTER_METADATA}, null),
        ALTERVIEW_AS("ALTERVIEW_AS", new Privilege[] {Privilege.ALTER_METADATA}, null),
        ALTERTABLE_COMPACT("ALTERTABLE_COMPACT", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA}),

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/queries/clientnegative/drop_invalid_constraint1.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientnegative/drop_invalid_constraint1.q b/ql/src/test/queries/clientnegative/drop_invalid_constraint1.q
    new file mode 100644
    index 0000000..2055f9e
    --- /dev/null
    +++ b/ql/src/test/queries/clientnegative/drop_invalid_constraint1.q
    @@ -0,0 +1,3 @@
    +CREATE TABLE table1 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate);
    +ALTER TABLE table1 DROP CONSTRAINT pk1;
    +ALTER TABLE table1 DROP CONSTRAINT pk1;

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/queries/clientnegative/drop_invalid_constraint2.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientnegative/drop_invalid_constraint2.q b/ql/src/test/queries/clientnegative/drop_invalid_constraint2.q
    new file mode 100644
    index 0000000..d253617
    --- /dev/null
    +++ b/ql/src/test/queries/clientnegative/drop_invalid_constraint2.q
    @@ -0,0 +1,2 @@
    +CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate);
    +ALTER TABLE table1 DROP CONSTRAINT pk1;

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/queries/clientnegative/drop_invalid_constraint3.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientnegative/drop_invalid_constraint3.q b/ql/src/test/queries/clientnegative/drop_invalid_constraint3.q
    new file mode 100644
    index 0000000..04eb1fb
    --- /dev/null
    +++ b/ql/src/test/queries/clientnegative/drop_invalid_constraint3.q
    @@ -0,0 +1,2 @@
    +CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate);
    +ALTER TABLE table2 DROP CONSTRAINT pk2;

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/queries/clientnegative/drop_invalid_constraint4.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientnegative/drop_invalid_constraint4.q b/ql/src/test/queries/clientnegative/drop_invalid_constraint4.q
    new file mode 100644
    index 0000000..3cf2d2a
    --- /dev/null
    +++ b/ql/src/test/queries/clientnegative/drop_invalid_constraint4.q
    @@ -0,0 +1,3 @@
    +CREATE TABLE table1 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate);
    +CREATE TABLE table2 (a STRING, b STRING, constraint pk2 primary key (a) disable novalidate);
    +ALTER TABLE table1 DROP CONSTRAINT pk2;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/queries/clientpositive/create_with_constraints.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/create_with_constraints.q b/ql/src/test/queries/clientpositive/create_with_constraints.q
    index eef0c64..0bb92e4 100644
    --- a/ql/src/test/queries/clientpositive/create_with_constraints.q
    +++ b/ql/src/test/queries/clientpositive/create_with_constraints.q
    @@ -8,5 +8,17 @@ CREATE TABLE table6 (x string, y string, PRIMARY KEY (x) disable novalidate, FOR
      CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1(a) DISABLE NOVALIDATE);
      CREATE TABLE table7 (a STRING, b STRING, primary key (a) disable novalidate rely);
      CREATE TABLE table8 (a STRING, b STRING, constraint pk8 primary key (a) disable novalidate norely);
    +CREATE TABLE table9 (a STRING, b STRING, primary key (a, b) disable novalidate rely);
    +CREATE TABLE table10 (a STRING, b STRING, constraint pk10 primary key (a) disable novalidate norely, foreign key (a, b) references table9(a, b) disable novalidate);
    +CREATE TABLE table11 (a STRING, b STRING, c STRING, constraint pk11 primary key (a) disable novalidate rely, foreign key (a, b) references table9(a, b) disable novalidate,
    +foreign key (c) references table4(x) disable novalidate);

    +ALTER TABLE table2 DROP CONSTRAINT pk1;
    +ALTER TABLE table3 DROP CONSTRAINT fk1;
    +ALTER TABLE table6 DROP CONSTRAINT fk4;

    +CREATE DATABASE dbconstraint;
    +USE dbconstraint;
    +CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate);
    +USE default;
    +ALTER TABLE dbconstraint.table2 DROP CONSTRAINT pk1;

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out
    new file mode 100644
    index 0000000..4568ccb
    --- /dev/null
    +++ b/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out
    @@ -0,0 +1,15 @@
    +PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@table1
    +POSTHOOK: query: CREATE TABLE table1 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@table1
    +PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1
    +PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +POSTHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1
    +POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1
    +PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1)

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out
    new file mode 100644
    index 0000000..0051131
    --- /dev/null
    +++ b/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out
    @@ -0,0 +1,11 @@
    +PREHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@table2
    +POSTHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@table2
    +PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1
    +PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1)

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out
    new file mode 100644
    index 0000000..9c60e94
    --- /dev/null
    +++ b/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out
    @@ -0,0 +1,11 @@
    +PREHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@table2
    +POSTHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@table2
    +PREHOOK: query: ALTER TABLE table2 DROP CONSTRAINT pk2
    +PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:The constraint: pk2 does not exist for the associated table: default.table2)

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out
    new file mode 100644
    index 0000000..1d93c42
    --- /dev/null
    +++ b/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out
    @@ -0,0 +1,19 @@
    +PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@table1
    +POSTHOOK: query: CREATE TABLE table1 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@table1
    +PREHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk2 primary key (a) disable novalidate)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@table2
    +POSTHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk2 primary key (a) disable novalidate)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@table2
    +PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk2
    +PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:The constraint: pk2 does not exist for the associated table: default.table1)

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/results/clientpositive/create_with_constraints.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/create_with_constraints.q.out b/ql/src/test/results/clientpositive/create_with_constraints.q.out
    index 5cf8d83..7a7a50a 100644
    --- a/ql/src/test/results/clientpositive/create_with_constraints.q.out
    +++ b/ql/src/test/results/clientpositive/create_with_constraints.q.out
    @@ -66,3 +66,71 @@ POSTHOOK: query: CREATE TABLE table8 (a STRING, b STRING, constraint pk8 primary
      POSTHOOK: type: CREATETABLE
      POSTHOOK: Output: database:default
      POSTHOOK: Output: default@table8
    +PREHOOK: query: CREATE TABLE table9 (a STRING, b STRING, primary key (a, b) disable novalidate rely)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@table9
    +POSTHOOK: query: CREATE TABLE table9 (a STRING, b STRING, primary key (a, b) disable novalidate rely)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@table9
    +PREHOOK: query: CREATE TABLE table10 (a STRING, b STRING, constraint pk10 primary key (a) disable novalidate norely, foreign key (a, b) references table9(a, b) disable novalidate)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@table10
    +POSTHOOK: query: CREATE TABLE table10 (a STRING, b STRING, constraint pk10 primary key (a) disable novalidate norely, foreign key (a, b) references table9(a, b) disable novalidate)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@table10
    +PREHOOK: query: CREATE TABLE table11 (a STRING, b STRING, c STRING, constraint pk11 primary key (a) disable novalidate rely, foreign key (a, b) references table9(a, b) disable novalidate,
    +foreign key (c) references table4(x) disable novalidate)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@table11
    +POSTHOOK: query: CREATE TABLE table11 (a STRING, b STRING, c STRING, constraint pk11 primary key (a) disable novalidate rely, foreign key (a, b) references table9(a, b) disable novalidate,
    +foreign key (c) references table4(x) disable novalidate)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@table11
    +PREHOOK: query: ALTER TABLE table2 DROP CONSTRAINT pk1
    +PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +POSTHOOK: query: ALTER TABLE table2 DROP CONSTRAINT pk1
    +POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +PREHOOK: query: ALTER TABLE table3 DROP CONSTRAINT fk1
    +PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +POSTHOOK: query: ALTER TABLE table3 DROP CONSTRAINT fk1
    +POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +PREHOOK: query: ALTER TABLE table6 DROP CONSTRAINT fk4
    +PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +POSTHOOK: query: ALTER TABLE table6 DROP CONSTRAINT fk4
    +POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +PREHOOK: query: CREATE DATABASE dbconstraint
    +PREHOOK: type: CREATEDATABASE
    +PREHOOK: Output: database:dbconstraint
    +POSTHOOK: query: CREATE DATABASE dbconstraint
    +POSTHOOK: type: CREATEDATABASE
    +POSTHOOK: Output: database:dbconstraint
    +PREHOOK: query: USE dbconstraint
    +PREHOOK: type: SWITCHDATABASE
    +PREHOOK: Input: database:dbconstraint
    +POSTHOOK: query: USE dbconstraint
    +POSTHOOK: type: SWITCHDATABASE
    +POSTHOOK: Input: database:dbconstraint
    +PREHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:dbconstraint
    +PREHOOK: Output: dbconstraint@table2
    +POSTHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:dbconstraint
    +POSTHOOK: Output: dbconstraint@table2
    +PREHOOK: query: USE default
    +PREHOOK: type: SWITCHDATABASE
    +PREHOOK: Input: database:default
    +POSTHOOK: query: USE default
    +POSTHOOK: type: SWITCHDATABASE
    +POSTHOOK: Input: database:default
    +PREHOOK: query: ALTER TABLE dbconstraint.table2 DROP CONSTRAINT pk1
    +PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
    +POSTHOOK: query: ALTER TABLE dbconstraint.table2 DROP CONSTRAINT pk1
    +POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT

    http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/service/src/gen/thrift/gen-py/__init__.py
    ----------------------------------------------------------------------
    diff --git a/service/src/gen/thrift/gen-py/__init__.py b/service/src/gen/thrift/gen-py/__init__.py
    deleted file mode 100644
    index e69de29..0000000
  • Spena at May 6, 2016 at 8:42 pm
    HIVE-13592 : metastore calls map is not thread safe (Sergey Shelukhin, reviewed by Aihua Xu)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f68b5dbb
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f68b5dbb
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f68b5dbb

    Branch: refs/heads/java8
    Commit: f68b5dbb59a9e837209e64aefe5aa994476c0bdc
    Parents: e68783c
    Author: Sergey Shelukhin <sershe@apache.org>
    Authored: Wed May 4 17:05:20 2016 -0700
    Committer: Sergey Shelukhin <sershe@apache.org>
    Committed: Wed May 4 17:05:39 2016 -0700

    ----------------------------------------------------------------------
      .../hive/metastore/RetryingMetaStoreClient.java | 17 +++++++++--------
      .../org/apache/hadoop/hive/ql/metadata/Hive.java | 3 ++-
      2 files changed, 11 insertions(+), 9 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/f68b5dbb/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
    index f672adf..3c125e0 100644
    --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
    +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
    @@ -25,6 +25,7 @@ import java.lang.reflect.Method;
      import java.lang.reflect.Proxy;
      import java.lang.reflect.UndeclaredThrowableException;
      import java.util.Map;
    +import java.util.concurrent.ConcurrentHashMap;
      import java.util.concurrent.TimeUnit;

      import org.slf4j.Logger;
    @@ -55,14 +56,14 @@ public class RetryingMetaStoreClient implements InvocationHandler {
        private final IMetaStoreClient base;
        private final int retryLimit;
        private final long retryDelaySeconds;
    - private final Map<String, Long> metaCallTimeMap;
    + private final ConcurrentHashMap<String, Long> metaCallTimeMap;
        private final long connectionLifeTimeInMillis;
        private long lastConnectionTime;
        private boolean localMetaStore;


        protected RetryingMetaStoreClient(HiveConf hiveConf, Class<?>[] constructorArgTypes,
    - Object[] constructorArgs, Map<String, Long> metaCallTimeMap,
    + Object[] constructorArgs, ConcurrentHashMap<String, Long> metaCallTimeMap,
            Class<? extends IMetaStoreClient> msClientClass) throws MetaException {

          this.retryLimit = hiveConf.getIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES);
    @@ -94,7 +95,7 @@ public class RetryingMetaStoreClient implements InvocationHandler {
        }

        public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
    - Map<String, Long> metaCallTimeMap, String mscClassName, boolean allowEmbedded)
    + ConcurrentHashMap<String, Long> metaCallTimeMap, String mscClassName, boolean allowEmbedded)
                throws MetaException {

          return getProxy(hiveConf,
    @@ -119,7 +120,7 @@ public class RetryingMetaStoreClient implements InvocationHandler {
         * Please use getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader) for external purpose.
         */
        public static IMetaStoreClient getProxy(HiveConf hiveConf, Class<?>[] constructorArgTypes,
    - Object[] constructorArgs, Map<String, Long> metaCallTimeMap,
    + Object[] constructorArgs, ConcurrentHashMap<String, Long> metaCallTimeMap,
            String mscClassName) throws MetaException {

          @SuppressWarnings("unchecked")
    @@ -202,11 +203,11 @@ public class RetryingMetaStoreClient implements InvocationHandler {

        private void addMethodTime(Method method, long timeTaken) {
          String methodStr = getMethodString(method);
    - Long curTime = metaCallTimeMap.get(methodStr);
    - if (curTime != null) {
    - timeTaken += curTime;
    + while (true) {
    + Long curTime = metaCallTimeMap.get(methodStr), newTime = timeTaken;
    + if (curTime != null && metaCallTimeMap.replace(methodStr, curTime, newTime + curTime)) break;
    + if (curTime == null && (null == metaCallTimeMap.putIfAbsent(methodStr, newTime))) break;
          }
    - metaCallTimeMap.put(methodStr, timeTaken);
        }

        /**

    http://git-wip-us.apache.org/repos/asf/hive/blob/f68b5dbb/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    index 6862f70..f4a9772 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    @@ -48,6 +48,7 @@ import java.util.concurrent.ExecutorService;
      import java.util.concurrent.Executors;
      import java.util.concurrent.Future;
      import java.util.concurrent.atomic.AtomicInteger;
    +import java.util.concurrent.ConcurrentHashMap;

      import com.google.common.collect.ImmutableMap;

    @@ -162,7 +163,7 @@ public class Hive {
        private UserGroupInformation owner;

        // metastore calls timing information
    - private final Map<String, Long> metaCallTimeMap = new HashMap<String, Long>();
    + private final ConcurrentHashMap<String, Long> metaCallTimeMap = new ConcurrentHashMap<>();

        private static ThreadLocal<Hive> hiveDB = new ThreadLocal<Hive>() {
          @Override
  • Spena at May 6, 2016 at 8:42 pm
    HIVE-13638: CBO rule to pull up constants through Sort/Limit (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b04dc95f
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b04dc95f
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b04dc95f

    Branch: refs/heads/java8
    Commit: b04dc95f4fa7dda9d4806c45dbe52aed4b9f1a18
    Parents: 2d33d09
    Author: Jesus Camacho Rodriguez <jcamacho@apache.org>
    Authored: Sat Apr 30 11:49:47 2016 +0100
    Committer: Jesus Camacho Rodriguez <jcamacho@apache.org>
    Committed: Wed May 4 18:57:30 2016 +0100

    ----------------------------------------------------------------------
      .../rules/HiveReduceExpressionsRule.java | 125 ++++
      .../rules/HiveSortLimitPullUpConstantsRule.java | 157 +++++
      .../hadoop/hive/ql/parse/CalcitePlanner.java | 3 +
      .../test/queries/clientpositive/cbo_input26.q | 54 ++
      .../results/clientpositive/cbo_input26.q.out | 596 +++++++++++++++++++
      .../clientpositive/load_dyn_part14.q.out | 6 +-
      .../clientpositive/spark/load_dyn_part14.q.out | 6 +-
      .../clientpositive/spark/union_remove_25.q.out | 60 +-
      .../clientpositive/union_remove_25.q.out | 20 +-
      9 files changed, 985 insertions(+), 42 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/b04dc95f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
    index 9006f45..2fe9b75 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
    @@ -396,6 +396,131 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
          assert constExps.size() == addCasts.size();
        }

    + /** Creates a map containing each (e, constant) pair that occurs within
    + * a predicate list.
    + *
    + * @param clazz Class of expression that is considered constant
    + * @param rexBuilder Rex builder
    + * @param predicates Predicate list
    + * @param <C> what to consider a constant: {@link RexLiteral} to use a narrow
    + * definition of constant, or {@link RexNode} to use
    + * {@link RexUtil#isConstant(RexNode)}
    + * @return Map from values to constants
    + */
    + public static <C extends RexNode> ImmutableMap<RexNode, C> predicateConstants(
    + Class<C> clazz, RexBuilder rexBuilder, RelOptPredicateList predicates) {
    + // We cannot use an ImmutableMap.Builder here. If there are multiple entries
    + // with the same key (e.g. "WHERE deptno = 1 AND deptno = 2"), it doesn't
    + // matter which we take, so the latter will replace the former.
    + // The basic idea is to find all the pairs of RexNode = RexLiteral
    + // (1) If 'predicates' contain a non-EQUALS, we bail out.
    + // (2) It is OK if a RexNode is equal to the same RexLiteral several times,
    + // (e.g. "WHERE deptno = 1 AND deptno = 1")
    + // (3) It will return false if there are inconsistent constraints (e.g.
    + // "WHERE deptno = 1 AND deptno = 2")
    + final Map<RexNode, C> map = new HashMap<>();
    + final Set<RexNode> excludeSet = new HashSet<>();
    + for (RexNode predicate : predicates.pulledUpPredicates) {
    + gatherConstraints(clazz, predicate, map, excludeSet, rexBuilder);
    + }
    + final ImmutableMap.Builder<RexNode, C> builder =
    + ImmutableMap.builder();
    + for (Map.Entry<RexNode, C> entry : map.entrySet()) {
    + RexNode rexNode = entry.getKey();
    + if (!overlap(rexNode, excludeSet)) {
    + builder.put(rexNode, entry.getValue());
    + }
    + }
    + return builder.build();
    + }
    +
    + private static <C extends RexNode> void gatherConstraints(Class<C> clazz,
    + RexNode predicate, Map<RexNode, C> map, Set<RexNode> excludeSet,
    + RexBuilder rexBuilder) {
    + if (predicate.getKind() != SqlKind.EQUALS) {
    + decompose(excludeSet, predicate);
    + return;
    + }
    + final List<RexNode> operands = ((RexCall) predicate).getOperands();
    + if (operands.size() != 2) {
    + decompose(excludeSet, predicate);
    + return;
    + }
    + // if it reaches here, we have rexNode equals rexNode
    + final RexNode left = operands.get(0);
    + final RexNode right = operands.get(1);
    + // note that literals are immutable too and they can only be compared through
    + // values.
    + gatherConstraint(clazz, left, right, map, excludeSet, rexBuilder);
    + gatherConstraint(clazz, right, left, map, excludeSet, rexBuilder);
    + }
    +
    + /** Returns whether a value of {@code type2} can be assigned to a variable
    + * of {@code type1}.
    + *
    + * <p>For example:
    + * <ul>
    + * <li>{@code canAssignFrom(BIGINT, TINYINT)} returns {@code true}</li>
    + * <li>{@code canAssignFrom(TINYINT, BIGINT)} returns {@code false}</li>
    + * <li>{@code canAssignFrom(BIGINT, VARCHAR)} returns {@code false}</li>
    + * </ul>
    + */
    + private static boolean canAssignFrom(RelDataType type1, RelDataType type2) {
    + final SqlTypeName name1 = type1.getSqlTypeName();
    + final SqlTypeName name2 = type2.getSqlTypeName();
    + if (name1.getFamily() == name2.getFamily()) {
    + switch (name1.getFamily()) {
    + case NUMERIC:
    + return name1.compareTo(name2) >= 0;
    + default:
    + return true;
    + }
    + }
    + return false;
    + }
    +
    + private static <C extends RexNode> void gatherConstraint(Class<C> clazz,
    + RexNode left, RexNode right, Map<RexNode, C> map, Set<RexNode> excludeSet,
    + RexBuilder rexBuilder) {
    + if (!clazz.isInstance(right)) {
    + return;
    + }
    + if (!RexUtil.isConstant(right)) {
    + return;
    + }
    + C constant = clazz.cast(right);
    + if (excludeSet.contains(left)) {
    + return;
    + }
    + final C existedValue = map.get(left);
    + if (existedValue == null) {
    + switch (left.getKind()) {
    + case CAST:
    + // Convert "CAST(c) = literal" to "c = literal", as long as it is a
    + // widening cast.
    + final RexNode operand = ((RexCall) left).getOperands().get(0);
    + if (canAssignFrom(left.getType(), operand.getType())) {
    + final RexNode castRight =
    + rexBuilder.makeCast(operand.getType(), constant);
    + if (castRight instanceof RexLiteral) {
    + left = operand;
    + constant = clazz.cast(castRight);
    + }
    + }
    + }
    + map.put(left, constant);
    + } else {
    + if (existedValue instanceof RexLiteral
    + && constant instanceof RexLiteral
    + && !((RexLiteral) existedValue).getValue()
    + .equals(((RexLiteral) constant).getValue())) {
    + // we found conflicting values, e.g. left = 10 and left = 20
    + map.remove(left);
    + excludeSet.add(left);
    + }
    + }
    + }
    +
        protected static ImmutableMap<RexNode, RexLiteral> predicateConstants(
            RelOptPredicateList predicates) {
          // We cannot use an ImmutableMap.Builder here. If there are multiple entries

    http://git-wip-us.apache.org/repos/asf/hive/blob/b04dc95f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java
    new file mode 100644
    index 0000000..d14b0ba
    --- /dev/null
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java
    @@ -0,0 +1,157 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
    +
    +import java.util.ArrayList;
    +import java.util.HashMap;
    +import java.util.List;
    +import java.util.Map;
    +
    +import org.apache.calcite.plan.RelOptPredicateList;
    +import org.apache.calcite.plan.RelOptRule;
    +import org.apache.calcite.plan.RelOptRuleCall;
    +import org.apache.calcite.plan.RelOptUtil;
    +import org.apache.calcite.rel.RelCollations;
    +import org.apache.calcite.rel.RelFieldCollation;
    +import org.apache.calcite.rel.RelNode;
    +import org.apache.calcite.rel.core.Sort;
    +import org.apache.calcite.rel.metadata.RelMetadataQuery;
    +import org.apache.calcite.rel.type.RelDataTypeField;
    +import org.apache.calcite.rex.RexBuilder;
    +import org.apache.calcite.rex.RexLiteral;
    +import org.apache.calcite.rex.RexNode;
    +import org.apache.calcite.rex.RexUtil;
    +import org.apache.calcite.tools.RelBuilder;
    +import org.apache.calcite.tools.RelBuilderFactory;
    +import org.apache.calcite.util.Pair;
    +import org.apache.calcite.util.mapping.Mappings;
    +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
    +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
    +
    +import com.google.common.collect.ImmutableList;
    +
    +/**
    + * Planner rule that pulls up constant keys through a SortLimit operator.
    + *
    + * This rule is only applied on SortLimit operators that are not the root
    + * of the plan tree. This is done because the interaction of this rule
    + * with the AST conversion may cause some optimizations to not kick in
    + * e.g. SimpleFetchOptimizer. Nevertheless, this will not have any
    + * performance impact in the resulting plans.
    + */
    +public class HiveSortLimitPullUpConstantsRule extends RelOptRule {
    +
    + protected static final Logger LOG = LoggerFactory.getLogger(HiveSortLimitPullUpConstantsRule.class);
    +
    +
    + public static final HiveSortLimitPullUpConstantsRule INSTANCE =
    + new HiveSortLimitPullUpConstantsRule(HiveSortLimit.class,
    + HiveRelFactories.HIVE_BUILDER);
    +
    + private HiveSortLimitPullUpConstantsRule(
    + Class<? extends Sort> sortClass,
    + RelBuilderFactory relBuilderFactory) {
    + super(operand(RelNode.class,
    + operand(sortClass, any())),
    + relBuilderFactory, null);
    + }
    +
    + @Override
    + public void onMatch(RelOptRuleCall call) {
    + final RelNode parent = call.rel(0);
    + final Sort sort = call.rel(1);
    +
    + final int count = sort.getInput().getRowType().getFieldCount();
    + if (count == 1) {
    + // No room for optimization since we cannot convert to an empty
    + // Project operator.
    + return;
    + }
    +
    + final RexBuilder rexBuilder = sort.getCluster().getRexBuilder();
    + final RelMetadataQuery mq = RelMetadataQuery.instance();
    + final RelOptPredicateList predicates = mq.getPulledUpPredicates(sort.getInput());
    + if (predicates == null) {
    + return;
    + }
    +
    + Map<RexNode, RexNode> constants = HiveReduceExpressionsRule.predicateConstants(
    + RexNode.class, rexBuilder, predicates);
    +
    + // None of the expressions are constant. Nothing to do.
    + if (constants.isEmpty()) {
    + return;
    + }
    +
    + if (count == constants.size()) {
    + // At least a single item in project is required.
    + final Map<RexNode, RexNode> map = new HashMap<>(constants);
    + map.remove(map.keySet().iterator().next());
    + constants = map;
    + }
    +
    + // Create expressions for Project operators before and after the Sort
    + List<RelDataTypeField> fields = sort.getInput().getRowType().getFieldList();
    + List<Pair<RexNode, String>> newChildExprs = new ArrayList<>();
    + List<RexNode> topChildExprs = new ArrayList<>();
    + List<String> topChildExprsFields = new ArrayList<>();
    + for (int i = 0; i < count ; i++) {
    + RexNode expr = rexBuilder.makeInputRef(sort.getInput(), i);
    + RelDataTypeField field = fields.get(i);
    + if (constants.containsKey(expr)) {
    + topChildExprs.add(constants.get(expr));
    + topChildExprsFields.add(field.getName());
    + } else {
    + newChildExprs.add(Pair.<RexNode,String>of(expr, field.getName()));
    + topChildExprs.add(expr);
    + topChildExprsFields.add(field.getName());
    + }
    + }
    +
    + // Update field collations
    + final Mappings.TargetMapping mapping =
    + RelOptUtil.permutation(Pair.left(newChildExprs), sort.getInput().getRowType()).inverse();
    + List<RelFieldCollation> fieldCollations = new ArrayList<>();
    + for (RelFieldCollation fc : sort.getCollation().getFieldCollations()) {
    + final int target = mapping.getTargetOpt(fc.getFieldIndex());
    + if (target < 0) {
    + // It is a constant, we can ignore it
    + continue;
    + }
    + fieldCollations.add(fc.copy(target));
    + }
    +
    + // Update top Project positions
    + topChildExprs = ImmutableList.copyOf(RexUtil.apply(mapping, topChildExprs));
    +
    + // Create new Project-Sort-Project sequence
    + final RelBuilder relBuilder = call.builder();
    + relBuilder.push(sort.getInput());
    + relBuilder.proj