FAQ
Repository: hive
Updated Branches:
   refs/heads/master-fixed [created] e8076ef41


http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 59c7b94..9873810 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -387,6 +387,17 @@ class Iface(fb303.FacebookService.Iface):
      """
      pass

+ def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name):
+ """
+ Parameters:
+ - partitionSpecs
+ - source_db
+ - source_table_name
+ - dest_db
+ - dest_table_name
+ """
+ pass
+
    def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names):
      """
      Parameters:
@@ -2728,6 +2739,53 @@ class Client(fb303.FacebookService.Client, Iface):
        raise result.o4
      raise TApplicationException(TApplicationException.MISSING_RESULT, "exchange_partition failed: unknown result")

+ def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name):
+ """
+ Parameters:
+ - partitionSpecs
+ - source_db
+ - source_table_name
+ - dest_db
+ - dest_table_name
+ """
+ self.send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+ return self.recv_exchange_partitions()
+
+ def send_exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name):
+ self._oprot.writeMessageBegin('exchange_partitions', TMessageType.CALL, self._seqid)
+ args = exchange_partitions_args()
+ args.partitionSpecs = partitionSpecs
+ args.source_db = source_db
+ args.source_table_name = source_table_name
+ args.dest_db = dest_db
+ args.dest_table_name = dest_table_name
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_exchange_partitions(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = exchange_partitions_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ if result.o1 is not None:
+ raise result.o1
+ if result.o2 is not None:
+ raise result.o2
+ if result.o3 is not None:
+ raise result.o3
+ if result.o4 is not None:
+ raise result.o4
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "exchange_partitions failed: unknown result")
+
    def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names):
      """
      Parameters:
@@ -5817,6 +5875,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
      self._processMap["drop_partitions_req"] = Processor.process_drop_partitions_req
      self._processMap["get_partition"] = Processor.process_get_partition
      self._processMap["exchange_partition"] = Processor.process_exchange_partition
+ self._processMap["exchange_partitions"] = Processor.process_exchange_partitions
      self._processMap["get_partition_with_auth"] = Processor.process_get_partition_with_auth
      self._processMap["get_partition_by_name"] = Processor.process_get_partition_by_name
      self._processMap["get_partitions"] = Processor.process_get_partitions
@@ -7069,6 +7128,37 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
      oprot.writeMessageEnd()
      oprot.trans.flush()

+ def process_exchange_partitions(self, seqid, iprot, oprot):
+ args = exchange_partitions_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = exchange_partitions_result()
+ try:
+ result.success = self._handler.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name)
+ msg_type = TMessageType.REPLY
+ except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+ raise
+ except MetaException as o1:
+ msg_type = TMessageType.REPLY
+ result.o1 = o1
+ except NoSuchObjectException as o2:
+ msg_type = TMessageType.REPLY
+ result.o2 = o2
+ except InvalidObjectException as o3:
+ msg_type = TMessageType.REPLY
+ result.o3 = o3
+ except InvalidInputException as o4:
+ msg_type = TMessageType.REPLY
+ result.o4 = o4
+ except Exception as ex:
+ msg_type = TMessageType.EXCEPTION
+ logging.exception(ex)
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("exchange_partitions", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
    def process_get_partition_with_auth(self, seqid, iprot, oprot):
      args = get_partition_with_auth_args()
      args.read(iprot)
@@ -17089,6 +17179,262 @@ class exchange_partition_result:
    def __ne__(self, other):
      return not (self == other)

+class exchange_partitions_args:
+ """
+ Attributes:
+ - partitionSpecs
+ - source_db
+ - source_table_name
+ - dest_db
+ - dest_table_name
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.MAP, 'partitionSpecs', (TType.STRING,None,TType.STRING,None), None, ), # 1
+ (2, TType.STRING, 'source_db', None, None, ), # 2
+ (3, TType.STRING, 'source_table_name', None, None, ), # 3
+ (4, TType.STRING, 'dest_db', None, None, ), # 4
+ (5, TType.STRING, 'dest_table_name', None, None, ), # 5
+ )
+
+ def __init__(self, partitionSpecs=None, source_db=None, source_table_name=None, dest_db=None, dest_table_name=None,):
+ self.partitionSpecs = partitionSpecs
+ self.source_db = source_db
+ self.source_table_name = source_table_name
+ self.dest_db = dest_db
+ self.dest_table_name = dest_table_name
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.MAP:
+ self.partitionSpecs = {}
+ (_ktype667, _vtype668, _size666 ) = iprot.readMapBegin()
+ for _i670 in xrange(_size666):
+ _key671 = iprot.readString()
+ _val672 = iprot.readString()
+ self.partitionSpecs[_key671] = _val672
+ iprot.readMapEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.source_db = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRING:
+ self.source_table_name = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRING:
+ self.dest_db = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.STRING:
+ self.dest_table_name = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('exchange_partitions_args')
+ if self.partitionSpecs is not None:
+ oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
+ oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
+ for kiter673,viter674 in self.partitionSpecs.items():
+ oprot.writeString(kiter673)
+ oprot.writeString(viter674)
+ oprot.writeMapEnd()
+ oprot.writeFieldEnd()
+ if self.source_db is not None:
+ oprot.writeFieldBegin('source_db', TType.STRING, 2)
+ oprot.writeString(self.source_db)
+ oprot.writeFieldEnd()
+ if self.source_table_name is not None:
+ oprot.writeFieldBegin('source_table_name', TType.STRING, 3)
+ oprot.writeString(self.source_table_name)
+ oprot.writeFieldEnd()
+ if self.dest_db is not None:
+ oprot.writeFieldBegin('dest_db', TType.STRING, 4)
+ oprot.writeString(self.dest_db)
+ oprot.writeFieldEnd()
+ if self.dest_table_name is not None:
+ oprot.writeFieldBegin('dest_table_name', TType.STRING, 5)
+ oprot.writeString(self.dest_table_name)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.partitionSpecs)
+ value = (value * 31) ^ hash(self.source_db)
+ value = (value * 31) ^ hash(self.source_table_name)
+ value = (value * 31) ^ hash(self.dest_db)
+ value = (value * 31) ^ hash(self.dest_table_name)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class exchange_partitions_result:
+ """
+ Attributes:
+ - success
+ - o1
+ - o2
+ - o3
+ - o4
+ """
+
+ thrift_spec = (
+ (0, TType.LIST, 'success', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 0
+ (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2
+ (3, TType.STRUCT, 'o3', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 3
+ (4, TType.STRUCT, 'o4', (InvalidInputException, InvalidInputException.thrift_spec), None, ), # 4
+ )
+
+ def __init__(self, success=None, o1=None, o2=None, o3=None, o4=None,):
+ self.success = success
+ self.o1 = o1
+ self.o2 = o2
+ self.o3 = o3
+ self.o4 = o4
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.LIST:
+ self.success = []
+ (_etype678, _size675) = iprot.readListBegin()
+ for _i679 in xrange(_size675):
+ _elem680 = Partition()
+ _elem680.read(iprot)
+ self.success.append(_elem680)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = MetaException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.o2 = NoSuchObjectException()
+ self.o2.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.o3 = InvalidObjectException()
+ self.o3.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRUCT:
+ self.o4 = InvalidInputException()
+ self.o4.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('exchange_partitions_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.LIST, 0)
+ oprot.writeListBegin(TType.STRUCT, len(self.success))
+ for iter681 in self.success:
+ iter681.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o2 is not None:
+ oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+ self.o2.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o3 is not None:
+ oprot.writeFieldBegin('o3', TType.STRUCT, 3)
+ self.o3.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o4 is not None:
+ oprot.writeFieldBegin('o4', TType.STRUCT, 4)
+ self.o4.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.success)
+ value = (value * 31) ^ hash(self.o1)
+ value = (value * 31) ^ hash(self.o2)
+ value = (value * 31) ^ hash(self.o3)
+ value = (value * 31) ^ hash(self.o4)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
  class get_partition_with_auth_args:
    """
    Attributes:
@@ -17137,10 +17483,10 @@ class get_partition_with_auth_args:
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
- (_etype669, _size666) = iprot.readListBegin()
- for _i670 in xrange(_size666):
- _elem671 = iprot.readString()
- self.part_vals.append(_elem671)
+ (_etype685, _size682) = iprot.readListBegin()
+ for _i686 in xrange(_size682):
+ _elem687 = iprot.readString()
+ self.part_vals.append(_elem687)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -17152,10 +17498,10 @@ class get_partition_with_auth_args:
        elif fid == 5:
          if ftype == TType.LIST:
            self.group_names = []
- (_etype675, _size672) = iprot.readListBegin()
- for _i676 in xrange(_size672):
- _elem677 = iprot.readString()
- self.group_names.append(_elem677)
+ (_etype691, _size688) = iprot.readListBegin()
+ for _i692 in xrange(_size688):
+ _elem693 = iprot.readString()
+ self.group_names.append(_elem693)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -17180,8 +17526,8 @@ class get_partition_with_auth_args:
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter678 in self.part_vals:
- oprot.writeString(iter678)
+ for iter694 in self.part_vals:
+ oprot.writeString(iter694)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.user_name is not None:
@@ -17191,8 +17537,8 @@ class get_partition_with_auth_args:
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 5)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter679 in self.group_names:
- oprot.writeString(iter679)
+ for iter695 in self.group_names:
+ oprot.writeString(iter695)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@ -17621,11 +17967,11 @@ class get_partitions_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype683, _size680) = iprot.readListBegin()
- for _i684 in xrange(_size680):
- _elem685 = Partition()
- _elem685.read(iprot)
- self.success.append(_elem685)
+ (_etype699, _size696) = iprot.readListBegin()
+ for _i700 in xrange(_size696):
+ _elem701 = Partition()
+ _elem701.read(iprot)
+ self.success.append(_elem701)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -17654,8 +18000,8 @@ class get_partitions_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter686 in self.success:
- iter686.write(oprot)
+ for iter702 in self.success:
+ iter702.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -17749,10 +18095,10 @@ class get_partitions_with_auth_args:
        elif fid == 5:
          if ftype == TType.LIST:
            self.group_names = []
- (_etype690, _size687) = iprot.readListBegin()
- for _i691 in xrange(_size687):
- _elem692 = iprot.readString()
- self.group_names.append(_elem692)
+ (_etype706, _size703) = iprot.readListBegin()
+ for _i707 in xrange(_size703):
+ _elem708 = iprot.readString()
+ self.group_names.append(_elem708)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -17785,8 +18131,8 @@ class get_partitions_with_auth_args:
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 5)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter693 in self.group_names:
- oprot.writeString(iter693)
+ for iter709 in self.group_names:
+ oprot.writeString(iter709)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@ -17847,11 +18193,11 @@ class get_partitions_with_auth_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype697, _size694) = iprot.readListBegin()
- for _i698 in xrange(_size694):
- _elem699 = Partition()
- _elem699.read(iprot)
- self.success.append(_elem699)
+ (_etype713, _size710) = iprot.readListBegin()
+ for _i714 in xrange(_size710):
+ _elem715 = Partition()
+ _elem715.read(iprot)
+ self.success.append(_elem715)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -17880,8 +18226,8 @@ class get_partitions_with_auth_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter700 in self.success:
- iter700.write(oprot)
+ for iter716 in self.success:
+ iter716.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -18039,11 +18385,11 @@ class get_partitions_pspec_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype704, _size701) = iprot.readListBegin()
- for _i705 in xrange(_size701):
- _elem706 = PartitionSpec()
- _elem706.read(iprot)
- self.success.append(_elem706)
+ (_etype720, _size717) = iprot.readListBegin()
+ for _i721 in xrange(_size717):
+ _elem722 = PartitionSpec()
+ _elem722.read(iprot)
+ self.success.append(_elem722)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -18072,8 +18418,8 @@ class get_partitions_pspec_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter707 in self.success:
- iter707.write(oprot)
+ for iter723 in self.success:
+ iter723.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -18228,10 +18574,10 @@ class get_partition_names_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype711, _size708) = iprot.readListBegin()
- for _i712 in xrange(_size708):
- _elem713 = iprot.readString()
- self.success.append(_elem713)
+ (_etype727, _size724) = iprot.readListBegin()
+ for _i728 in xrange(_size724):
+ _elem729 = iprot.readString()
+ self.success.append(_elem729)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -18254,8 +18600,8 @@ class get_partition_names_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
- for iter714 in self.success:
- oprot.writeString(iter714)
+ for iter730 in self.success:
+ oprot.writeString(iter730)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o2 is not None:
@@ -18331,10 +18677,10 @@ class get_partitions_ps_args:
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
- (_etype718, _size715) = iprot.readListBegin()
- for _i719 in xrange(_size715):
- _elem720 = iprot.readString()
- self.part_vals.append(_elem720)
+ (_etype734, _size731) = iprot.readListBegin()
+ for _i735 in xrange(_size731):
+ _elem736 = iprot.readString()
+ self.part_vals.append(_elem736)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -18364,8 +18710,8 @@ class get_partitions_ps_args:
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter721 in self.part_vals:
- oprot.writeString(iter721)
+ for iter737 in self.part_vals:
+ oprot.writeString(iter737)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.max_parts is not None:
@@ -18429,11 +18775,11 @@ class get_partitions_ps_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype725, _size722) = iprot.readListBegin()
- for _i726 in xrange(_size722):
- _elem727 = Partition()
- _elem727.read(iprot)
- self.success.append(_elem727)
+ (_etype741, _size738) = iprot.readListBegin()
+ for _i742 in xrange(_size738):
+ _elem743 = Partition()
+ _elem743.read(iprot)
+ self.success.append(_elem743)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -18462,8 +18808,8 @@ class get_partitions_ps_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter728 in self.success:
- iter728.write(oprot)
+ for iter744 in self.success:
+ iter744.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -18550,10 +18896,10 @@ class get_partitions_ps_with_auth_args:
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
- (_etype732, _size729) = iprot.readListBegin()
- for _i733 in xrange(_size729):
- _elem734 = iprot.readString()
- self.part_vals.append(_elem734)
+ (_etype748, _size745) = iprot.readListBegin()
+ for _i749 in xrange(_size745):
+ _elem750 = iprot.readString()
+ self.part_vals.append(_elem750)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -18570,10 +18916,10 @@ class get_partitions_ps_with_auth_args:
        elif fid == 6:
          if ftype == TType.LIST:
            self.group_names = []
- (_etype738, _size735) = iprot.readListBegin()
- for _i739 in xrange(_size735):
- _elem740 = iprot.readString()
- self.group_names.append(_elem740)
+ (_etype754, _size751) = iprot.readListBegin()
+ for _i755 in xrange(_size751):
+ _elem756 = iprot.readString()
+ self.group_names.append(_elem756)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -18598,8 +18944,8 @@ class get_partitions_ps_with_auth_args:
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter741 in self.part_vals:
- oprot.writeString(iter741)
+ for iter757 in self.part_vals:
+ oprot.writeString(iter757)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.max_parts is not None:
@@ -18613,8 +18959,8 @@ class get_partitions_ps_with_auth_args:
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 6)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter742 in self.group_names:
- oprot.writeString(iter742)
+ for iter758 in self.group_names:
+ oprot.writeString(iter758)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@ -18676,11 +19022,11 @@ class get_partitions_ps_with_auth_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype746, _size743) = iprot.readListBegin()
- for _i747 in xrange(_size743):
- _elem748 = Partition()
- _elem748.read(iprot)
- self.success.append(_elem748)
+ (_etype762, _size759) = iprot.readListBegin()
+ for _i763 in xrange(_size759):
+ _elem764 = Partition()
+ _elem764.read(iprot)
+ self.success.append(_elem764)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -18709,8 +19055,8 @@ class get_partitions_ps_with_auth_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter749 in self.success:
- iter749.write(oprot)
+ for iter765 in self.success:
+ iter765.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -18791,10 +19137,10 @@ class get_partition_names_ps_args:
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
- (_etype753, _size750) = iprot.readListBegin()
- for _i754 in xrange(_size750):
- _elem755 = iprot.readString()
- self.part_vals.append(_elem755)
+ (_etype769, _size766) = iprot.readListBegin()
+ for _i770 in xrange(_size766):
+ _elem771 = iprot.readString()
+ self.part_vals.append(_elem771)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -18824,8 +19170,8 @@ class get_partition_names_ps_args:
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter756 in self.part_vals:
- oprot.writeString(iter756)
+ for iter772 in self.part_vals:
+ oprot.writeString(iter772)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.max_parts is not None:
@@ -18889,10 +19235,10 @@ class get_partition_names_ps_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype760, _size757) = iprot.readListBegin()
- for _i761 in xrange(_size757):
- _elem762 = iprot.readString()
- self.success.append(_elem762)
+ (_etype776, _size773) = iprot.readListBegin()
+ for _i777 in xrange(_size773):
+ _elem778 = iprot.readString()
+ self.success.append(_elem778)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -18921,8 +19267,8 @@ class get_partition_names_ps_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
- for iter763 in self.success:
- oprot.writeString(iter763)
+ for iter779 in self.success:
+ oprot.writeString(iter779)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -19093,11 +19439,11 @@ class get_partitions_by_filter_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype767, _size764) = iprot.readListBegin()
- for _i768 in xrange(_size764):
- _elem769 = Partition()
- _elem769.read(iprot)
- self.success.append(_elem769)
+ (_etype783, _size780) = iprot.readListBegin()
+ for _i784 in xrange(_size780):
+ _elem785 = Partition()
+ _elem785.read(iprot)
+ self.success.append(_elem785)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -19126,8 +19472,8 @@ class get_partitions_by_filter_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter770 in self.success:
- iter770.write(oprot)
+ for iter786 in self.success:
+ iter786.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -19298,11 +19644,11 @@ class get_part_specs_by_filter_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype774, _size771) = iprot.readListBegin()
- for _i775 in xrange(_size771):
- _elem776 = PartitionSpec()
- _elem776.read(iprot)
- self.success.append(_elem776)
+ (_etype790, _size787) = iprot.readListBegin()
+ for _i791 in xrange(_size787):
+ _elem792 = PartitionSpec()
+ _elem792.read(iprot)
+ self.success.append(_elem792)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -19331,8 +19677,8 @@ class get_part_specs_by_filter_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter777 in self.success:
- iter777.write(oprot)
+ for iter793 in self.success:
+ iter793.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -19569,10 +19915,10 @@ class get_partitions_by_names_args:
        elif fid == 3:
          if ftype == TType.LIST:
            self.names = []
- (_etype781, _size778) = iprot.readListBegin()
- for _i782 in xrange(_size778):
- _elem783 = iprot.readString()
- self.names.append(_elem783)
+ (_etype797, _size794) = iprot.readListBegin()
+ for _i798 in xrange(_size794):
+ _elem799 = iprot.readString()
+ self.names.append(_elem799)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -19597,8 +19943,8 @@ class get_partitions_by_names_args:
      if self.names is not None:
        oprot.writeFieldBegin('names', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.names))
- for iter784 in self.names:
- oprot.writeString(iter784)
+ for iter800 in self.names:
+ oprot.writeString(iter800)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@ -19657,11 +20003,11 @@ class get_partitions_by_names_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype788, _size785) = iprot.readListBegin()
- for _i789 in xrange(_size785):
- _elem790 = Partition()
- _elem790.read(iprot)
- self.success.append(_elem790)
+ (_etype804, _size801) = iprot.readListBegin()
+ for _i805 in xrange(_size801):
+ _elem806 = Partition()
+ _elem806.read(iprot)
+ self.success.append(_elem806)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -19690,8 +20036,8 @@ class get_partitions_by_names_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter791 in self.success:
- iter791.write(oprot)
+ for iter807 in self.success:
+ iter807.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -19941,11 +20287,11 @@ class alter_partitions_args:
        elif fid == 3:
          if ftype == TType.LIST:
            self.new_parts = []
- (_etype795, _size792) = iprot.readListBegin()
- for _i796 in xrange(_size792):
- _elem797 = Partition()
- _elem797.read(iprot)
- self.new_parts.append(_elem797)
+ (_etype811, _size808) = iprot.readListBegin()
+ for _i812 in xrange(_size808):
+ _elem813 = Partition()
+ _elem813.read(iprot)
+ self.new_parts.append(_elem813)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -19970,8 +20316,8 @@ class alter_partitions_args:
      if self.new_parts is not None:
        oprot.writeFieldBegin('new_parts', TType.LIST, 3)
        oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
- for iter798 in self.new_parts:
- iter798.write(oprot)
+ for iter814 in self.new_parts:
+ iter814.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@ -20310,10 +20656,10 @@ class rename_partition_args:
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
- (_etype802, _size799) = iprot.readListBegin()
- for _i803 in xrange(_size799):
- _elem804 = iprot.readString()
- self.part_vals.append(_elem804)
+ (_etype818, _size815) = iprot.readListBegin()
+ for _i819 in xrange(_size815):
+ _elem820 = iprot.readString()
+ self.part_vals.append(_elem820)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -20344,8 +20690,8 @@ class rename_partition_args:
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter805 in self.part_vals:
- oprot.writeString(iter805)
+ for iter821 in self.part_vals:
+ oprot.writeString(iter821)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.new_part is not None:
@@ -20487,10 +20833,10 @@ class partition_name_has_valid_characters_args:
        if fid == 1:
          if ftype == TType.LIST:
            self.part_vals = []
- (_etype809, _size806) = iprot.readListBegin()
- for _i810 in xrange(_size806):
- _elem811 = iprot.readString()
- self.part_vals.append(_elem811)
+ (_etype825, _size822) = iprot.readListBegin()
+ for _i826 in xrange(_size822):
+ _elem827 = iprot.readString()
+ self.part_vals.append(_elem827)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -20512,8 +20858,8 @@ class partition_name_has_valid_characters_args:
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 1)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter812 in self.part_vals:
- oprot.writeString(iter812)
+ for iter828 in self.part_vals:
+ oprot.writeString(iter828)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.throw_exception is not None:
@@ -20871,10 +21217,10 @@ class partition_name_to_vals_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype816, _size813) = iprot.readListBegin()
- for _i817 in xrange(_size813):
- _elem818 = iprot.readString()
- self.success.append(_elem818)
+ (_etype832, _size829) = iprot.readListBegin()
+ for _i833 in xrange(_size829):
+ _elem834 = iprot.readString()
+ self.success.append(_elem834)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -20897,8 +21243,8 @@ class partition_name_to_vals_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
- for iter819 in self.success:
- oprot.writeString(iter819)
+ for iter835 in self.success:
+ oprot.writeString(iter835)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -21022,11 +21368,11 @@ class partition_name_to_spec_result:
        if fid == 0:
          if ftype == TType.MAP:
            self.success = {}
- (_ktype821, _vtype822, _size820 ) = iprot.readMapBegin()
- for _i824 in xrange(_size820):
- _key825 = iprot.readString()
- _val826 = iprot.readString()
- self.success[_key825] = _val826
+ (_ktype837, _vtype838, _size836 ) = iprot.readMapBegin()
+ for _i840 in xrange(_size836):
+ _key841 = iprot.readString()
+ _val842 = iprot.readString()
+ self.success[_key841] = _val842
            iprot.readMapEnd()
          else:
            iprot.skip(ftype)
@@ -21049,9 +21395,9 @@ class partition_name_to_spec_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.MAP, 0)
        oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
- for kiter827,viter828 in self.success.items():
- oprot.writeString(kiter827)
- oprot.writeString(viter828)
+ for kiter843,viter844 in self.success.items():
+ oprot.writeString(kiter843)
+ oprot.writeString(viter844)
        oprot.writeMapEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -21127,11 +21473,11 @@ class markPartitionForEvent_args:
        elif fid == 3:
          if ftype == TType.MAP:
            self.part_vals = {}
- (_ktype830, _vtype831, _size829 ) = iprot.readMapBegin()
- for _i833 in xrange(_size829):
- _key834 = iprot.readString()
- _val835 = iprot.readString()
- self.part_vals[_key834] = _val835
+ (_ktype846, _vtype847, _size845 ) = iprot.readMapBegin()
+ for _i849 in xrange(_size845):
+ _key850 = iprot.readString()
+ _val851 = iprot.readString()
+ self.part_vals[_key850] = _val851
            iprot.readMapEnd()
          else:
            iprot.skip(ftype)
@@ -21161,9 +21507,9 @@ class markPartitionForEvent_args:
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.MAP, 3)
        oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
- for kiter836,viter837 in self.part_vals.items():
- oprot.writeString(kiter836)
- oprot.writeString(viter837)
+ for kiter852,viter853 in self.part_vals.items():
+ oprot.writeString(kiter852)
+ oprot.writeString(viter853)
        oprot.writeMapEnd()
        oprot.writeFieldEnd()
      if self.eventType is not None:
@@ -21377,11 +21723,11 @@ class isPartitionMarkedForEvent_args:
        elif fid == 3:
          if ftype == TType.MAP:
            self.part_vals = {}
- (_ktype839, _vtype840, _size838 ) = iprot.readMapBegin()
- for _i842 in xrange(_size838):
- _key843 = iprot.readString()
- _val844 = iprot.readString()
- self.part_vals[_key843] = _val844
+ (_ktype855, _vtype856, _size854 ) = iprot.readMapBegin()
+ for _i858 in xrange(_size854):
+ _key859 = iprot.readString()
+ _val860 = iprot.readString()
+ self.part_vals[_key859] = _val860
            iprot.readMapEnd()
          else:
            iprot.skip(ftype)
@@ -21411,9 +21757,9 @@ class isPartitionMarkedForEvent_args:
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.MAP, 3)
        oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
- for kiter845,viter846 in self.part_vals.items():
- oprot.writeString(kiter845)
- oprot.writeString(viter846)
+ for kiter861,viter862 in self.part_vals.items():
+ oprot.writeString(kiter861)
+ oprot.writeString(viter862)
        oprot.writeMapEnd()
        oprot.writeFieldEnd()
      if self.eventType is not None:
@@ -22468,11 +22814,11 @@ class get_indexes_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype850, _size847) = iprot.readListBegin()
- for _i851 in xrange(_size847):
- _elem852 = Index()
- _elem852.read(iprot)
- self.success.append(_elem852)
+ (_etype866, _size863) = iprot.readListBegin()
+ for _i867 in xrange(_size863):
+ _elem868 = Index()
+ _elem868.read(iprot)
+ self.success.append(_elem868)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -22501,8 +22847,8 @@ class get_indexes_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter853 in self.success:
- iter853.write(oprot)
+ for iter869 in self.success:
+ iter869.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -22657,10 +23003,10 @@ class get_index_names_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype857, _size854) = iprot.readListBegin()
- for _i858 in xrange(_size854):
- _elem859 = iprot.readString()
- self.success.append(_elem859)
+ (_etype873, _size870) = iprot.readListBegin()
+ for _i874 in xrange(_size870):
+ _elem875 = iprot.readString()
+ self.success.append(_elem875)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -22683,8 +23029,8 @@ class get_index_names_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
- for iter860 in self.success:
- oprot.writeString(iter860)
+ for iter876 in self.success:
+ oprot.writeString(iter876)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o2 is not None:
@@ -25232,10 +25578,10 @@ class get_functions_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype864, _size861) = iprot.readListBegin()
- for _i865 in xrange(_size861):
- _elem866 = iprot.readString()
- self.success.append(_elem866)
+ (_etype880, _size877) = iprot.readListBegin()
+ for _i881 in xrange(_size877):
+ _elem882 = iprot.readString()
+ self.success.append(_elem882)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -25258,8 +25604,8 @@ class get_functions_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
- for iter867 in self.success:
- oprot.writeString(iter867)
+ for iter883 in self.success:
+ oprot.writeString(iter883)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -25947,10 +26293,10 @@ class get_role_names_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype871, _size868) = iprot.readListBegin()
- for _i872 in xrange(_size868):
- _elem873 = iprot.readString()
- self.success.append(_elem873)
+ (_etype887, _size884) = iprot.readListBegin()
+ for _i888 in xrange(_size884):
+ _elem889 = iprot.readString()
+ self.success.append(_elem889)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -25973,8 +26319,8 @@ class get_role_names_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
- for iter874 in self.success:
- oprot.writeString(iter874)
+ for iter890 in self.success:
+ oprot.writeString(iter890)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -26488,11 +26834,11 @@ class list_roles_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype878, _size875) = iprot.readListBegin()
- for _i879 in xrange(_size875):
- _elem880 = Role()
- _elem880.read(iprot)
- self.success.append(_elem880)
+ (_etype894, _size891) = iprot.readListBegin()
+ for _i895 in xrange(_size891):
+ _elem896 = Role()
+ _elem896.read(iprot)
+ self.success.append(_elem896)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -26515,8 +26861,8 @@ class list_roles_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter881 in self.success:
- iter881.write(oprot)
+ for iter897 in self.success:
+ iter897.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -27025,10 +27371,10 @@ class get_privilege_set_args:
        elif fid == 3:
          if ftype == TType.LIST:
            self.group_names = []
- (_etype885, _size882) = iprot.readListBegin()
- for _i886 in xrange(_size882):
- _elem887 = iprot.readString()
- self.group_names.append(_elem887)
+ (_etype901, _size898) = iprot.readListBegin()
+ for _i902 in xrange(_size898):
+ _elem903 = iprot.readString()
+ self.group_names.append(_elem903)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -27053,8 +27399,8 @@ class get_privilege_set_args:
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter888 in self.group_names:
- oprot.writeString(iter888)
+ for iter904 in self.group_names:
+ oprot.writeString(iter904)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@ -27281,11 +27627,11 @@ class list_privileges_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype892, _size889) = iprot.readListBegin()
- for _i893 in xrange(_size889):
- _elem894 = HiveObjectPrivilege()
- _elem894.read(iprot)
- self.success.append(_elem894)
+ (_etype908, _size905) = iprot.readListBegin()
+ for _i909 in xrange(_size905):
+ _elem910 = HiveObjectPrivilege()
+ _elem910.read(iprot)
+ self.success.append(_elem910)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -27308,8 +27654,8 @@ class list_privileges_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter895 in self.success:
- iter895.write(oprot)
+ for iter911 in self.success:
+ iter911.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@ -27807,10 +28153,10 @@ class set_ugi_args:
        elif fid == 2:
          if ftype == TType.LIST:
            self.group_names = []
- (_etype899, _size896) = iprot.readListBegin()
- for _i900 in xrange(_size896):
- _elem901 = iprot.readString()
- self.group_names.append(_elem901)
+ (_etype915, _size912) = iprot.readListBegin()
+ for _i916 in xrange(_size912):
+ _elem917 = iprot.readString()
+ self.group_names.append(_elem917)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -27831,8 +28177,8 @@ class set_ugi_args:
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 2)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter902 in self.group_names:
- oprot.writeString(iter902)
+ for iter918 in self.group_names:
+ oprot.writeString(iter918)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@ -27887,10 +28233,10 @@ class set_ugi_result:
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
- (_etype906, _size903) = iprot.readListBegin()
- for _i907 in xrange(_size903):
- _elem908 = iprot.readString()
- self.success.append(_elem908)
+ (_etype922, _size919) = iprot.readListBegin()
+ for _i923 in xrange(_size919):
+ _elem924 = iprot.readString()
+ self.success.append(_elem924)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -27913,8 +28259,8 @@ class set_ugi_result:
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
- for iter909 in self.success:
- oprot.writeString(iter909)
+ for iter925 in self.success:
+ oprot.writeString(iter925)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index 7b93158..c613e4b 100644
--- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -766,6 +766,25 @@ module ThriftHiveMetastore
        raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'exchange_partition failed: unknown result')
      end

+ def exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+ send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+ return recv_exchange_partitions()
+ end
+
+ def send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+ send_message('exchange_partitions', Exchange_partitions_args, :partitionSpecs => partitionSpecs, :source_db => source_db, :source_table_name => source_table_name, :dest_db => dest_db, :dest_table_name => dest_table_name)
+ end
+
+ def recv_exchange_partitions()
+ result = receive_message(Exchange_partitions_result)
+ return result.success unless result.success.nil?
+ raise result.o1 unless result.o1.nil?
+ raise result.o2 unless result.o2.nil?
+ raise result.o3 unless result.o3.nil?
+ raise result.o4 unless result.o4.nil?
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'exchange_partitions failed: unknown result')
+ end
+
      def get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names)
        send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names)
        return recv_get_partition_with_auth()
@@ -2775,6 +2794,23 @@ module ThriftHiveMetastore
        write_result(result, oprot, 'exchange_partition', seqid)
      end

+ def process_exchange_partitions(seqid, iprot, oprot)
+ args = read_args(iprot, Exchange_partitions_args)
+ result = Exchange_partitions_result.new()
+ begin
+ result.success = @handler.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name)
+ rescue ::MetaException => o1
+ result.o1 = o1
+ rescue ::NoSuchObjectException => o2
+ result.o2 = o2
+ rescue ::InvalidObjectException => o3
+ result.o3 = o3
+ rescue ::InvalidInputException => o4
+ result.o4 = o4
+ end
+ write_result(result, oprot, 'exchange_partitions', seqid)
+ end
+
      def process_get_partition_with_auth(seqid, iprot, oprot)
        args = read_args(iprot, Get_partition_with_auth_args)
        result = Get_partition_with_auth_result.new()
@@ -5509,6 +5545,54 @@ module ThriftHiveMetastore
      ::Thrift::Struct.generate_accessors self
    end

+ class Exchange_partitions_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ PARTITIONSPECS = 1
+ SOURCE_DB = 2
+ SOURCE_TABLE_NAME = 3
+ DEST_DB = 4
+ DEST_TABLE_NAME = 5
+
+ FIELDS = {
+ PARTITIONSPECS => {:type => ::Thrift::Types::MAP, :name => 'partitionSpecs', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
+ SOURCE_DB => {:type => ::Thrift::Types::STRING, :name => 'source_db'},
+ SOURCE_TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'source_table_name'},
+ DEST_DB => {:type => ::Thrift::Types::STRING, :name => 'dest_db'},
+ DEST_TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'dest_table_name'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Exchange_partitions_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
+ O1 = 1
+ O2 = 2
+ O3 = 3
+ O4 = 4
+
+ FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}},
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException},
+ O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException},
+ O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::InvalidObjectException},
+ O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::InvalidInputException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
    class Get_partition_with_auth_args
      include ::Thrift::Struct, ::Thrift::Struct_Union
      DB_NAME = 1

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 2740e40..2e9afaf 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -2467,6 +2467,15 @@ public class HiveMetaStore extends ThriftHiveMetastore {
          String sourceDbName, String sourceTableName, String destDbName,
          String destTableName) throws MetaException, NoSuchObjectException,
          InvalidObjectException, InvalidInputException, TException {
+ exchange_partitions(partitionSpecs, sourceDbName, sourceTableName, destDbName, destTableName);
+ return new Partition();
+ }
+
+ @Override
+ public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+ String sourceDbName, String sourceTableName, String destDbName,
+ String destTableName) throws MetaException, NoSuchObjectException,
+ InvalidObjectException, InvalidInputException, TException {
        boolean success = false;
        boolean pathCreated = false;
        RawStore ms = getMS();
@@ -2501,6 +2510,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
        Path destPath = new Path(destinationTable.getSd().getLocation(),
            Warehouse.makePartName(partitionKeysPresent, partValsPresent));
        try {
+ List<Partition> destPartitions = new ArrayList<Partition>();
          for (Partition partition: partitionsToExchange) {
            Partition destPartition = new Partition(partition);
            destPartition.setDbName(destDbName);
@@ -2509,6 +2519,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
                Warehouse.makePartName(destinationTable.getPartitionKeys(), partition.getValues()));
            destPartition.getSd().setLocation(destPartitionPath.toString());
            ms.addPartition(destPartition);
+ destPartitions.add(destPartition);
            ms.dropPartition(partition.getDbName(), sourceTable.getTableName(),
              partition.getValues());
          }
@@ -2524,6 +2535,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           */
          pathCreated = wh.renameDir(sourcePath, destPath);
          success = ms.commitTransaction();
+ return destPartitions;
        } finally {
          if (!success || !pathCreated) {
            ms.rollbackTransaction();
@@ -2532,7 +2544,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
            }
          }
        }
- return new Partition();
      }

      private boolean drop_partition_common(RawStore ms, String db_name, String tbl_name,

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 3960f5d..f86ec45 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -659,6 +659,22 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
          destDb, destinationTableName);
    }

+ /**
+ * Exchange the partitions between two tables
+ * @param partitionSpecs partitions specs of the parent partition to be exchanged
+ * @param destDb the db of the destination table
+ * @param destinationTableName the destination table name
+ @ @return new partitions after exchanging
+ */
+ @Override
+ public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+ String sourceDb, String sourceTable, String destDb,
+ String destinationTableName) throws MetaException,
+ NoSuchObjectException, InvalidObjectException, TException {
+ return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable,
+ destDb, destinationTableName);
+ }
+
    @Override
    public void validatePartitionNameCharacters(List<String> partVals)
        throws TException, MetaException {

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index f3a23f5..9279cf5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -453,6 +453,22 @@ public interface IMetaStoreClient {
        InvalidObjectException, TException;

    /**
+ * With the one partitionSpecs to exchange, multiple partitions could be exchanged.
+ * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions
+ * belonging to it exchanged. This function returns the list of affected partitions.
+ * @param partitionSpecs
+ * @param sourceDb
+ * @param sourceTable
+ * @param destdb
+ * @param destTableName
+ * @return the list of the new partitions
+ */
+ List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+ String sourceDb, String sourceTable, String destdb,
+ String destTableName) throws MetaException, NoSuchObjectException,
+ InvalidObjectException, TException;
+
+ /**
     * @param dbName
     * @param tblName
     * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01'

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index ff86d6e..caf98b5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4203,9 +4203,20 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
      Map<String, String> partitionSpecs = exchangePartition.getPartitionSpecs();
      Table destTable = exchangePartition.getDestinationTable();
      Table sourceTable = exchangePartition.getSourceTable();
- db.exchangeTablePartitions(partitionSpecs, sourceTable.getDbName(),
+ List<Partition> partitions =
+ db.exchangeTablePartitions(partitionSpecs, sourceTable.getDbName(),
          sourceTable.getTableName(),destTable.getDbName(),
          destTable.getTableName());
+
+ for(Partition partition : partitions) {
+ // Reuse the partition specs from dest partition since they should be the same
+ work.getOutputs().add(new WriteEntity(new Partition(sourceTable, partition.getSpec(), null),
+ WriteEntity.WriteType.DELETE));
+
+ work.getOutputs().add(new WriteEntity(new Partition(destTable, partition.getSpec(), null),
+ WriteEntity.WriteType.INSERT));
+ }
+
      return 0;
    }


http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 9db740b..488d923 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -2091,7 +2091,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
        }
        List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().dropPartitions(
            dbName, tblName, partExprs, dropOptions);
- return convertFromMetastore(tbl, tParts, null);
+ return convertFromMetastore(tbl, tParts);
      } catch (NoSuchObjectException e) {
        throw new HiveException("Partition or table doesn't exist.", e);
      } catch (Exception e) {
@@ -2335,22 +2335,20 @@ private void constructOneLBLocationMap(FileStatus fSta,

      List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().listPartitionsByFilter(
          tbl.getDbName(), tbl.getTableName(), filter, (short)-1);
- return convertFromMetastore(tbl, tParts, null);
+ return convertFromMetastore(tbl, tParts);
    }

    private static List<Partition> convertFromMetastore(Table tbl,
- List<org.apache.hadoop.hive.metastore.api.Partition> src,
- List<Partition> dest) throws HiveException {
- if (src == null) {
- return dest;
+ List<org.apache.hadoop.hive.metastore.api.Partition> partitions) throws HiveException {
+ if (partitions == null) {
+ return new ArrayList<Partition>();
      }
- if (dest == null) {
- dest = new ArrayList<Partition>(src.size());
- }
- for (org.apache.hadoop.hive.metastore.api.Partition tPart : src) {
- dest.add(new Partition(tbl, tPart));
+
+ List<Partition> results = new ArrayList<Partition>(partitions.size());
+ for (org.apache.hadoop.hive.metastore.api.Partition tPart : partitions) {
+ results.add(new Partition(tbl, tPart));
      }
- return dest;
+ return results;
    }

    /**
@@ -2370,7 +2368,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
          new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>();
      boolean hasUnknownParts = getMSC().listPartitionsByExpr(tbl.getDbName(),
          tbl.getTableName(), exprBytes, defaultPartitionName, (short)-1, msParts);
- convertFromMetastore(tbl, msParts, result);
+ result.addAll(convertFromMetastore(tbl, msParts));
      return hasUnknownParts;
    }

@@ -3001,12 +2999,15 @@ private void constructOneLBLocationMap(FileStatus fSta,
      return ShimLoader.getMajorVersion().startsWith("0.20");
    }

- public void exchangeTablePartitions(Map<String, String> partitionSpecs,
+ public List<Partition> exchangeTablePartitions(Map<String, String> partitionSpecs,
        String sourceDb, String sourceTable, String destDb,
        String destinationTableName) throws HiveException {
      try {
- getMSC().exchange_partition(partitionSpecs, sourceDb, sourceTable, destDb,
+ List<org.apache.hadoop.hive.metastore.api.Partition> partitions =
+ getMSC().exchange_partitions(partitionSpecs, sourceDb, sourceTable, destDb,
          destinationTableName);
+
+ return convertFromMetastore(getTable(destDb, destinationTableName), partitions);
      } catch (Exception ex) {
        LOG.error(StringUtils.stringifyException(ex));
        throw new HiveException(ex);

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index b4546e1..eea2fcc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -19,6 +19,7 @@
  package org.apache.hadoop.hive.ql.parse;

  import com.google.common.collect.Lists;
+
  import org.antlr.runtime.tree.CommonTree;
  import org.antlr.runtime.tree.Tree;
  import org.slf4j.Logger;
@@ -49,6 +50,7 @@ import org.apache.hadoop.hive.ql.exec.TaskFactory;
  import org.apache.hadoop.hive.ql.exec.Utilities;
  import org.apache.hadoop.hive.ql.hooks.ReadEntity;
  import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType;
  import org.apache.hadoop.hive.ql.index.HiveIndex;
  import org.apache.hadoop.hive.ql.index.HiveIndex.IndexType;
  import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
@@ -718,6 +720,9 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
        new AlterTableExchangePartition(sourceTable, destTable, partSpecs);
      rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
        alterTableExchangePartition), conf));
+
+ outputs.add(new WriteEntity(sourceTable, WriteType.DDL_SHARED));
+ outputs.add(new WriteEntity(destTable, WriteType.DDL_SHARED));
    }

    /**

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/test/results/clientnegative/exchange_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/exchange_partition.q.out b/ql/src/test/results/clientnegative/exchange_partition.q.out
index 8622615..f5e332a 100644
--- a/ql/src/test/results/clientnegative/exchange_partition.q.out
+++ b/ql/src/test/results/clientnegative/exchange_partition.q.out
@@ -51,4 +51,6 @@ POSTHOOK: Input: default@ex_table2
  part=part1
  PREHOOK: query: ALTER TABLE ex_table1 EXCHANGE PARTITION (part='part1') WITH TABLE ex_table2
  PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: default@ex_table1
+PREHOOK: Output: default@ex_table2
  FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Got exception: java.io.IOException Cannot rename the source path. The destination path already exists.)

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/test/results/clientpositive/exchange_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/exchange_partition.q.out b/ql/src/test/results/clientpositive/exchange_partition.q.out
index 5b21eaf..9316341 100644
--- a/ql/src/test/results/clientpositive/exchange_partition.q.out
+++ b/ql/src/test/results/clientpositive/exchange_partition.q.out
@@ -60,8 +60,14 @@ POSTHOOK: Input: ex2@exchange_part_test2
  ds=2013-04-05
  PREHOOK: query: ALTER TABLE ex1.exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE ex2.exchange_part_test2
  PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: ex1@exchange_part_test1
+PREHOOK: Output: ex2@exchange_part_test2
  POSTHOOK: query: ALTER TABLE ex1.exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE ex2.exchange_part_test2
  POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: Output: ex1@exchange_part_test1
+POSTHOOK: Output: ex1@exchange_part_test1@ds=2013-04-05
+POSTHOOK: Output: ex2@exchange_part_test2
+POSTHOOK: Output: ex2@exchange_part_test2@ds=2013-04-05
  PREHOOK: query: SHOW PARTITIONS ex1.exchange_part_test1
  PREHOOK: type: SHOWPARTITIONS
  PREHOOK: Input: ex1@exchange_part_test1

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/test/results/clientpositive/exchange_partition2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/exchange_partition2.q.out b/ql/src/test/results/clientpositive/exchange_partition2.q.out
index 8c7c583..05121d8 100644
--- a/ql/src/test/results/clientpositive/exchange_partition2.q.out
+++ b/ql/src/test/results/clientpositive/exchange_partition2.q.out
@@ -48,8 +48,14 @@ POSTHOOK: Input: default@exchange_part_test2
  ds=2013-04-05/hr=1
  PREHOOK: query: ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05', hr='1') WITH TABLE exchange_part_test2
  PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: default@exchange_part_test1
+PREHOOK: Output: default@exchange_part_test2
  POSTHOOK: query: ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05', hr='1') WITH TABLE exchange_part_test2
  POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: Output: default@exchange_part_test1
+POSTHOOK: Output: default@exchange_part_test1@ds=2013-04-05/hr=1
+POSTHOOK: Output: default@exchange_part_test2
+POSTHOOK: Output: default@exchange_part_test2@ds=2013-04-05/hr=1
  PREHOOK: query: SHOW PARTITIONS exchange_part_test1
  PREHOOK: type: SHOWPARTITIONS
  PREHOOK: Input: default@exchange_part_test1

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/test/results/clientpositive/exchange_partition3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/exchange_partition3.q.out b/ql/src/test/results/clientpositive/exchange_partition3.q.out
index 3815861..014be7c 100644
--- a/ql/src/test/results/clientpositive/exchange_partition3.q.out
+++ b/ql/src/test/results/clientpositive/exchange_partition3.q.out
@@ -65,9 +65,17 @@ ds=2013-04-05/hr=2
  PREHOOK: query: -- This will exchange both partitions hr=1 and hr=2
  ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2
  PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: default@exchange_part_test1
+PREHOOK: Output: default@exchange_part_test2
  POSTHOOK: query: -- This will exchange both partitions hr=1 and hr=2
  ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2
  POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: Output: default@exchange_part_test1
+POSTHOOK: Output: default@exchange_part_test1@ds=2013-04-05/hr=1
+POSTHOOK: Output: default@exchange_part_test1@ds=2013-04-05/hr=2
+POSTHOOK: Output: default@exchange_part_test2
+POSTHOOK: Output: default@exchange_part_test2@ds=2013-04-05/hr=1
+POSTHOOK: Output: default@exchange_part_test2@ds=2013-04-05/hr=2
  PREHOOK: query: SHOW PARTITIONS exchange_part_test1
  PREHOOK: type: SHOWPARTITIONS
  PREHOOK: Input: default@exchange_part_test1

http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/ql/src/test/results/clientpositive/exchgpartition2lel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/exchgpartition2lel.q.out b/ql/src/test/results/clientpositive/exchgpartition2lel.q.out
index 5997d6b..3fd996a 100644
--- a/ql/src/test/results/clientpositive/exchgpartition2lel.q.out
+++ b/ql/src/test/results/clientpositive/exchgpartition2lel.q.out
@@ -113,8 +113,14 @@ POSTHOOK: Input: default@t3@d1=1/d2=1
  100 1 1
  PREHOOK: query: ALTER TABLE t2 EXCHANGE PARTITION (d1 = 1) WITH TABLE t1
  PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: default@t1
+PREHOOK: Output: default@t2
  POSTHOOK: query: ALTER TABLE t2 EXCHANGE PARTITION (d1 = 1) WITH TABLE t1
  POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: Output: default@t1
+POSTHOOK: Output: default@t1@d1=1
+POSTHOOK: Output: default@t2
+POSTHOOK: Output: default@t2@d1=1
  PREHOOK: query: SELECT * FROM t1
  PREHOOK: type: QUERY
  PREHOOK: Input: default@t1
@@ -136,8 +142,14 @@ POSTHOOK: Input: default@t2@d1=1
  100 1
  PREHOOK: query: ALTER TABLE t4 EXCHANGE PARTITION (d1 = 1, d2 = 1) WITH TABLE t3
  PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: default@t3
+PREHOOK: Output: default@t4
  POSTHOOK: query: ALTER TABLE t4 EXCHANGE PARTITION (d1 = 1, d2 = 1) WITH TABLE t3
  POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: Output: default@t3
+POSTHOOK: Output: default@t3@d1=1/d2=1
+POSTHOOK: Output: default@t4
+POSTHOOK: Output: default@t4@d1=1/d2=1
  PREHOOK: query: SELECT * FROM t3
  PREHOOK: type: QUERY
  PREHOOK: Input: default@t3
@@ -159,8 +171,14 @@ POSTHOOK: Input: default@t4@d1=1/d2=1
  100 1 1
  PREHOOK: query: ALTER TABLE t6 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5
  PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: Output: default@t5
+PREHOOK: Output: default@t6
  POSTHOOK: query: ALTER TABLE t6 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5
  POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: Output: default@t5
+POSTHOOK: Output: default@t5@d1=1/d2=1/d3=1
+POSTHOOK: Output: default@t6
+POSTHOOK: Output: default@t6@d1=1/d2=1/d3=1
  PREHOOK: query: SELECT * FROM t5
  PREHOOK: type: QUERY
  PREHOOK: Input: default@t5

Search Discussions

  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-12215: Exchange partition does not show outputs field for post/pre execute hooks (Aihua Xu, reviewed by Xuefu Zhang)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/55a24f0a
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/55a24f0a
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/55a24f0a

    Branch: refs/heads/master-fixed
    Commit: 55a24f0a0da2e984cb59ab513b4d7b9cb7c3b2d8
    Parents: f4bac6a
    Author: Aihua Xu <aihuaxu@gmail.com>
    Authored: Mon Nov 2 09:21:38 2015 -0800
    Committer: Chao Sun <sunchao@apache.org>
    Committed: Mon Nov 2 09:21:38 2015 -0800

    ----------------------------------------------------------------------
      metastore/if/hive_metastore.thrift | 5 +
      .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 1752 +++++---
      .../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 186 +
      .../ThriftHiveMetastore_server.skeleton.cpp | 5 +
      .../hive/metastore/api/ThriftHiveMetastore.java | 3987 +++++++++++++-----
      .../gen-php/metastore/ThriftHiveMetastore.php | 1144 +++--
      .../hive_metastore/ThriftHiveMetastore-remote | 7 +
      .../hive_metastore/ThriftHiveMetastore.py | 790 +++-
      .../gen/thrift/gen-rb/thrift_hive_metastore.rb | 84 +
      .../hadoop/hive/metastore/HiveMetaStore.java | 13 +-
      .../hive/metastore/HiveMetaStoreClient.java | 16 +
      .../hadoop/hive/metastore/IMetaStoreClient.java | 16 +
      .../org/apache/hadoop/hive/ql/exec/DDLTask.java | 13 +-
      .../apache/hadoop/hive/ql/metadata/Hive.java | 31 +-
      .../hive/ql/parse/DDLSemanticAnalyzer.java | 5 +
      .../clientnegative/exchange_partition.q.out | 2 +
      .../clientpositive/exchange_partition.q.out | 6 +
      .../clientpositive/exchange_partition2.q.out | 6 +
      .../clientpositive/exchange_partition3.q.out | 8 +
      .../clientpositive/exchgpartition2lel.q.out | 18 +
      20 files changed, 5902 insertions(+), 2192 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/if/hive_metastore.thrift
    ----------------------------------------------------------------------
    diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift
    index 3e30f56..98fd42b 100755
    --- a/metastore/if/hive_metastore.thrift
    +++ b/metastore/if/hive_metastore.thrift
    @@ -988,6 +988,11 @@ service ThriftHiveMetastore extends fb303.FacebookService
            throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3,
            4:InvalidInputException o4)

    + list<Partition> exchange_partitions(1:map<string, string> partitionSpecs, 2:string source_db,
    + 3:string source_table_name, 4:string dest_db, 5:string dest_table_name)
    + throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3,
    + 4:InvalidInputException o4)
    +
        Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals,
            4: string user_name, 5: list<string> group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2)
  • Jxiang at Nov 6, 2015 at 5:32 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
    index 0c67416..3bc7e10 100644
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
    @@ -130,6 +130,8 @@ public class ThriftHiveMetastore {

          public Partition exchange_partition(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException;

    + public List<Partition> exchange_partitions(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException;
    +
          public Partition get_partition_with_auth(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;

          public Partition get_partition_by_name(String db_name, String tbl_name, String part_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
    @@ -392,6 +394,8 @@ public class ThriftHiveMetastore {

          public void exchange_partition(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;

    + public void exchange_partitions(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
    +
          public void get_partition_with_auth(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;

          public void get_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
    @@ -1937,6 +1941,45 @@ public class ThriftHiveMetastore {
            throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "exchange_partition failed: unknown result");
          }

    + public List<Partition> exchange_partitions(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException
    + {
    + send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name);
    + return recv_exchange_partitions();
    + }
    +
    + public void send_exchange_partitions(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws org.apache.thrift.TException
    + {
    + exchange_partitions_args args = new exchange_partitions_args();
    + args.setPartitionSpecs(partitionSpecs);
    + args.setSource_db(source_db);
    + args.setSource_table_name(source_table_name);
    + args.setDest_db(dest_db);
    + args.setDest_table_name(dest_table_name);
    + sendBase("exchange_partitions", args);
    + }
    +
    + public List<Partition> recv_exchange_partitions() throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException
    + {
    + exchange_partitions_result result = new exchange_partitions_result();
    + receiveBase(result, "exchange_partitions");
    + if (result.isSetSuccess()) {
    + return result.success;
    + }
    + if (result.o1 != null) {
    + throw result.o1;
    + }
    + if (result.o2 != null) {
    + throw result.o2;
    + }
    + if (result.o3 != null) {
    + throw result.o3;
    + }
    + if (result.o4 != null) {
    + throw result.o4;
    + }
    + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "exchange_partitions failed: unknown result");
    + }
    +
          public Partition get_partition_with_auth(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException
          {
            send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names);
    @@ -5953,6 +5996,50 @@ public class ThriftHiveMetastore {
            }
          }

    + public void exchange_partitions(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
    + checkReady();
    + exchange_partitions_call method_call = new exchange_partitions_call(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, resultHandler, this, ___protocolFactory, ___transport);
    + this.___currentMethod = method_call;
    + ___manager.call(method_call);
    + }
    +
    + public static class exchange_partitions_call extends org.apache.thrift.async.TAsyncMethodCall {
    + private Map<String,String> partitionSpecs;
    + private String source_db;
    + private String source_table_name;
    + private String dest_db;
    + private String dest_table_name;
    + public exchange_partitions_call(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
    + super(client, protocolFactory, transport, resultHandler, false);
    + this.partitionSpecs = partitionSpecs;
    + this.source_db = source_db;
    + this.source_table_name = source_table_name;
    + this.dest_db = dest_db;
    + this.dest_table_name = dest_table_name;
    + }
    +
    + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
    + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("exchange_partitions", org.apache.thrift.protocol.TMessageType.CALL, 0));
    + exchange_partitions_args args = new exchange_partitions_args();
    + args.setPartitionSpecs(partitionSpecs);
    + args.setSource_db(source_db);
    + args.setSource_table_name(source_table_name);
    + args.setDest_db(dest_db);
    + args.setDest_table_name(dest_table_name);
    + args.write(prot);
    + prot.writeMessageEnd();
    + }
    +
    + public List<Partition> getResult() throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException {
    + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
    + throw new IllegalStateException("Method call not finished!");
    + }
    + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
    + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
    + return (new Client(prot)).recv_exchange_partitions();
    + }
    + }
    +
          public void get_partition_with_auth(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
            checkReady();
            get_partition_with_auth_call method_call = new get_partition_with_auth_call(db_name, tbl_name, part_vals, user_name, group_names, resultHandler, this, ___protocolFactory, ___transport);
    @@ -8997,6 +9084,7 @@ public class ThriftHiveMetastore {
            processMap.put("drop_partitions_req", new drop_partitions_req());
            processMap.put("get_partition", new get_partition());
            processMap.put("exchange_partition", new exchange_partition());
    + processMap.put("exchange_partitions", new exchange_partitions());
            processMap.put("get_partition_with_auth", new get_partition_with_auth());
            processMap.put("get_partition_by_name", new get_partition_by_name());
            processMap.put("get_partitions", new get_partitions());
    @@ -10271,6 +10359,36 @@ public class ThriftHiveMetastore {
            }
          }

    + public static class exchange_partitions<I extends Iface> extends org.apache.thrift.ProcessFunction<I, exchange_partitions_args> {
    + public exchange_partitions() {
    + super("exchange_partitions");
    + }
    +
    + public exchange_partitions_args getEmptyArgsInstance() {
    + return new exchange_partitions_args();
    + }
    +
    + protected boolean isOneway() {
    + return false;
    + }
    +
    + public exchange_partitions_result getResult(I iface, exchange_partitions_args args) throws org.apache.thrift.TException {
    + exchange_partitions_result result = new exchange_partitions_result();
    + try {
    + result.success = iface.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name);
    + } catch (MetaException o1) {
    + result.o1 = o1;
    + } catch (NoSuchObjectException o2) {
    + result.o2 = o2;
    + } catch (InvalidObjectException o3) {
    + result.o3 = o3;
    + } catch (InvalidInputException o4) {
    + result.o4 = o4;
    + }
    + return result;
    + }
    + }
    +
          public static class get_partition_with_auth<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_partition_with_auth_args> {
            public get_partition_with_auth() {
              super("get_partition_with_auth");
    @@ -12463,6 +12581,7 @@ public class ThriftHiveMetastore {
            processMap.put("drop_partitions_req", new drop_partitions_req());
            processMap.put("get_partition", new get_partition());
            processMap.put("exchange_partition", new exchange_partition());
    + processMap.put("exchange_partitions", new exchange_partitions());
            processMap.put("get_partition_with_auth", new get_partition_with_auth());
            processMap.put("get_partition_by_name", new get_partition_by_name());
            processMap.put("get_partitions", new get_partitions());
    @@ -15361,20 +15480,20 @@ public class ThriftHiveMetastore {
            }
          }

    - public static class get_partition_with_auth<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_with_auth_args, Partition> {
    - public get_partition_with_auth() {
    - super("get_partition_with_auth");
    + public static class exchange_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, exchange_partitions_args, List<Partition>> {
    + public exchange_partitions() {
    + super("exchange_partitions");
            }

    - public get_partition_with_auth_args getEmptyArgsInstance() {
    - return new get_partition_with_auth_args();
    + public exchange_partitions_args getEmptyArgsInstance() {
    + return new exchange_partitions_args();
            }

    - public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Partition>() {
    - public void onComplete(Partition o) {
    - get_partition_with_auth_result result = new get_partition_with_auth_result();
    + return new AsyncMethodCallback<List<Partition>>() {
    + public void onComplete(List<Partition> o) {
    + exchange_partitions_result result = new exchange_partitions_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -15387,7 +15506,7 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - get_partition_with_auth_result result = new get_partition_with_auth_result();
    + exchange_partitions_result result = new exchange_partitions_result();
                  if (e instanceof MetaException) {
                              result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
    @@ -15398,6 +15517,16 @@ public class ThriftHiveMetastore {
                              result.setO2IsSet(true);
                              msg = result;
                  }
    + else if (e instanceof InvalidObjectException) {
    + result.o3 = (InvalidObjectException) e;
    + result.setO3IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof InvalidInputException) {
    + result.o4 = (InvalidInputException) e;
    + result.setO4IsSet(true);
    + msg = result;
    + }
                   else
                  {
                    msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    @@ -15418,25 +15547,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, get_partition_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    - iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names,resultHandler);
    + public void start(I iface, exchange_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    + iface.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name,resultHandler);
            }
          }

    - public static class get_partition_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_by_name_args, Partition> {
    - public get_partition_by_name() {
    - super("get_partition_by_name");
    + public static class get_partition_with_auth<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_with_auth_args, Partition> {
    + public get_partition_with_auth() {
    + super("get_partition_with_auth");
            }

    - public get_partition_by_name_args getEmptyArgsInstance() {
    - return new get_partition_by_name_args();
    + public get_partition_with_auth_args getEmptyArgsInstance() {
    + return new get_partition_with_auth_args();
            }

            public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
              return new AsyncMethodCallback<Partition>() {
                public void onComplete(Partition o) {
    - get_partition_by_name_result result = new get_partition_by_name_result();
    + get_partition_with_auth_result result = new get_partition_with_auth_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -15449,7 +15578,7 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - get_partition_by_name_result result = new get_partition_by_name_result();
    + get_partition_with_auth_result result = new get_partition_with_auth_result();
                  if (e instanceof MetaException) {
                              result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
    @@ -15480,25 +15609,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, get_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    - iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler);
    + public void start(I iface, get_partition_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    + iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names,resultHandler);
            }
          }

    - public static class get_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_args, List<Partition>> {
    - public get_partitions() {
    - super("get_partitions");
    + public static class get_partition_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_by_name_args, Partition> {
    + public get_partition_by_name() {
    + super("get_partition_by_name");
            }

    - public get_partitions_args getEmptyArgsInstance() {
    - return new get_partitions_args();
    + public get_partition_by_name_args getEmptyArgsInstance() {
    + return new get_partition_by_name_args();
            }

    - public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<List<Partition>>() {
    - public void onComplete(List<Partition> o) {
    - get_partitions_result result = new get_partitions_result();
    + return new AsyncMethodCallback<Partition>() {
    + public void onComplete(Partition o) {
    + get_partition_by_name_result result = new get_partition_by_name_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -15511,14 +15640,14 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - get_partitions_result result = new get_partitions_result();
    - if (e instanceof NoSuchObjectException) {
    - result.o1 = (NoSuchObjectException) e;
    + get_partition_by_name_result result = new get_partition_by_name_result();
    + if (e instanceof MetaException) {
    + result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof MetaException) {
    - result.o2 = (MetaException) e;
    + else if (e instanceof NoSuchObjectException) {
    + result.o2 = (NoSuchObjectException) e;
                              result.setO2IsSet(true);
                              msg = result;
                  }
    @@ -15542,87 +15671,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, get_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    - iface.get_partitions(args.db_name, args.tbl_name, args.max_parts,resultHandler);
    + public void start(I iface, get_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
    + iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler);
            }
          }

    - public static class get_partitions_with_auth<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_with_auth_args, List<Partition>> {
    - public get_partitions_with_auth() {
    - super("get_partitions_with_auth");
    + public static class get_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_args, List<Partition>> {
    + public get_partitions() {
    + super("get_partitions");
            }

    - public get_partitions_with_auth_args getEmptyArgsInstance() {
    - return new get_partitions_with_auth_args();
    + public get_partitions_args getEmptyArgsInstance() {
    + return new get_partitions_args();
            }

            public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
              return new AsyncMethodCallback<List<Partition>>() {
                public void onComplete(List<Partition> o) {
    - get_partitions_with_auth_result result = new get_partitions_with_auth_result();
    - result.success = o;
    - try {
    - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    - return;
    - } catch (Exception e) {
    - LOGGER.error("Exception writing to internal frame buffer", e);
    - }
    - fb.close();
    - }
    - public void onError(Exception e) {
    - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    - org.apache.thrift.TBase msg;
    - get_partitions_with_auth_result result = new get_partitions_with_auth_result();
    - if (e instanceof NoSuchObjectException) {
    - result.o1 = (NoSuchObjectException) e;
    - result.setO1IsSet(true);
    - msg = result;
    - }
    - else if (e instanceof MetaException) {
    - result.o2 = (MetaException) e;
    - result.setO2IsSet(true);
    - msg = result;
    - }
    - else
    - {
    - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    - }
    - try {
    - fcall.sendResponse(fb,msg,msgType,seqid);
    - return;
    - } catch (Exception ex) {
    - LOGGER.error("Exception writing to internal frame buffer", ex);
    - }
    - fb.close();
    - }
    - };
    - }
    -
    - protected boolean isOneway() {
    - return false;
    - }
    -
    - public void start(I iface, get_partitions_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    - iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names,resultHandler);
    - }
    - }
    -
    - public static class get_partitions_pspec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_pspec_args, List<PartitionSpec>> {
    - public get_partitions_pspec() {
    - super("get_partitions_pspec");
    - }
    -
    - public get_partitions_pspec_args getEmptyArgsInstance() {
    - return new get_partitions_pspec_args();
    - }
    -
    - public AsyncMethodCallback<List<PartitionSpec>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    - final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<List<PartitionSpec>>() {
    - public void onComplete(List<PartitionSpec> o) {
    - get_partitions_pspec_result result = new get_partitions_pspec_result();
    + get_partitions_result result = new get_partitions_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -15635,7 +15702,7 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - get_partitions_pspec_result result = new get_partitions_pspec_result();
    + get_partitions_result result = new get_partitions_result();
                  if (e instanceof NoSuchObjectException) {
                              result.o1 = (NoSuchObjectException) e;
                              result.setO1IsSet(true);
    @@ -15666,144 +15733,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, get_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback<List<PartitionSpec>> resultHandler) throws TException {
    - iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts,resultHandler);
    - }
    - }
    -
    - public static class get_partition_names<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_names_args, List<String>> {
    - public get_partition_names() {
    - super("get_partition_names");
    - }
    -
    - public get_partition_names_args getEmptyArgsInstance() {
    - return new get_partition_names_args();
    - }
    -
    - public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    - final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<List<String>>() {
    - public void onComplete(List<String> o) {
    - get_partition_names_result result = new get_partition_names_result();
    - result.success = o;
    - try {
    - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    - return;
    - } catch (Exception e) {
    - LOGGER.error("Exception writing to internal frame buffer", e);
    - }
    - fb.close();
    - }
    - public void onError(Exception e) {
    - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    - org.apache.thrift.TBase msg;
    - get_partition_names_result result = new get_partition_names_result();
    - if (e instanceof MetaException) {
    - result.o2 = (MetaException) e;
    - result.setO2IsSet(true);
    - msg = result;
    - }
    - else
    - {
    - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    - }
    - try {
    - fcall.sendResponse(fb,msg,msgType,seqid);
    - return;
    - } catch (Exception ex) {
    - LOGGER.error("Exception writing to internal frame buffer", ex);
    - }
    - fb.close();
    - }
    - };
    - }
    -
    - protected boolean isOneway() {
    - return false;
    - }
    -
    - public void start(I iface, get_partition_names_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
    - iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts,resultHandler);
    - }
    - }
    -
    - public static class get_partitions_ps<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_ps_args, List<Partition>> {
    - public get_partitions_ps() {
    - super("get_partitions_ps");
    - }
    -
    - public get_partitions_ps_args getEmptyArgsInstance() {
    - return new get_partitions_ps_args();
    - }
    -
    - public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    - final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<List<Partition>>() {
    - public void onComplete(List<Partition> o) {
    - get_partitions_ps_result result = new get_partitions_ps_result();
    - result.success = o;
    - try {
    - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    - return;
    - } catch (Exception e) {
    - LOGGER.error("Exception writing to internal frame buffer", e);
    - }
    - fb.close();
    - }
    - public void onError(Exception e) {
    - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    - org.apache.thrift.TBase msg;
    - get_partitions_ps_result result = new get_partitions_ps_result();
    - if (e instanceof MetaException) {
    - result.o1 = (MetaException) e;
    - result.setO1IsSet(true);
    - msg = result;
    - }
    - else if (e instanceof NoSuchObjectException) {
    - result.o2 = (NoSuchObjectException) e;
    - result.setO2IsSet(true);
    - msg = result;
    - }
    - else
    - {
    - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    - }
    - try {
    - fcall.sendResponse(fb,msg,msgType,seqid);
    - return;
    - } catch (Exception ex) {
    - LOGGER.error("Exception writing to internal frame buffer", ex);
    - }
    - fb.close();
    - }
    - };
    - }
    -
    - protected boolean isOneway() {
    - return false;
    - }
    -
    - public void start(I iface, get_partitions_ps_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    - iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler);
    + public void start(I iface, get_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    + iface.get_partitions(args.db_name, args.tbl_name, args.max_parts,resultHandler);
            }
          }

    - public static class get_partitions_ps_with_auth<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_ps_with_auth_args, List<Partition>> {
    - public get_partitions_ps_with_auth() {
    - super("get_partitions_ps_with_auth");
    + public static class get_partitions_with_auth<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_with_auth_args, List<Partition>> {
    + public get_partitions_with_auth() {
    + super("get_partitions_with_auth");
            }

    - public get_partitions_ps_with_auth_args getEmptyArgsInstance() {
    - return new get_partitions_ps_with_auth_args();
    + public get_partitions_with_auth_args getEmptyArgsInstance() {
    + return new get_partitions_with_auth_args();
            }

            public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
              return new AsyncMethodCallback<List<Partition>>() {
                public void onComplete(List<Partition> o) {
    - get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result();
    + get_partitions_with_auth_result result = new get_partitions_with_auth_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -15816,7 +15764,7 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result();
    + get_partitions_with_auth_result result = new get_partitions_with_auth_result();
                  if (e instanceof NoSuchObjectException) {
                              result.o1 = (NoSuchObjectException) e;
                              result.setO1IsSet(true);
    @@ -15847,25 +15795,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, get_partitions_ps_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    - iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names,resultHandler);
    + public void start(I iface, get_partitions_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    + iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names,resultHandler);
            }
          }

    - public static class get_partition_names_ps<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_names_ps_args, List<String>> {
    - public get_partition_names_ps() {
    - super("get_partition_names_ps");
    + public static class get_partitions_pspec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_pspec_args, List<PartitionSpec>> {
    + public get_partitions_pspec() {
    + super("get_partitions_pspec");
            }

    - public get_partition_names_ps_args getEmptyArgsInstance() {
    - return new get_partition_names_ps_args();
    + public get_partitions_pspec_args getEmptyArgsInstance() {
    + return new get_partitions_pspec_args();
            }

    - public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<PartitionSpec>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<List<String>>() {
    - public void onComplete(List<String> o) {
    - get_partition_names_ps_result result = new get_partition_names_ps_result();
    + return new AsyncMethodCallback<List<PartitionSpec>>() {
    + public void onComplete(List<PartitionSpec> o) {
    + get_partitions_pspec_result result = new get_partitions_pspec_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -15878,14 +15826,14 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - get_partition_names_ps_result result = new get_partition_names_ps_result();
    - if (e instanceof MetaException) {
    - result.o1 = (MetaException) e;
    + get_partitions_pspec_result result = new get_partitions_pspec_result();
    + if (e instanceof NoSuchObjectException) {
    + result.o1 = (NoSuchObjectException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof NoSuchObjectException) {
    - result.o2 = (NoSuchObjectException) e;
    + else if (e instanceof MetaException) {
    + result.o2 = (MetaException) e;
                              result.setO2IsSet(true);
                              msg = result;
                  }
    @@ -15909,25 +15857,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, get_partition_names_ps_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
    - iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler);
    + public void start(I iface, get_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback<List<PartitionSpec>> resultHandler) throws TException {
    + iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts,resultHandler);
            }
          }

    - public static class get_partitions_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_by_filter_args, List<Partition>> {
    - public get_partitions_by_filter() {
    - super("get_partitions_by_filter");
    + public static class get_partition_names<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_names_args, List<String>> {
    + public get_partition_names() {
    + super("get_partition_names");
            }

    - public get_partitions_by_filter_args getEmptyArgsInstance() {
    - return new get_partitions_by_filter_args();
    + public get_partition_names_args getEmptyArgsInstance() {
    + return new get_partition_names_args();
            }

    - public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<List<Partition>>() {
    - public void onComplete(List<Partition> o) {
    - get_partitions_by_filter_result result = new get_partitions_by_filter_result();
    + return new AsyncMethodCallback<List<String>>() {
    + public void onComplete(List<String> o) {
    + get_partition_names_result result = new get_partition_names_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -15940,14 +15888,9 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - get_partitions_by_filter_result result = new get_partitions_by_filter_result();
    + get_partition_names_result result = new get_partition_names_result();
                  if (e instanceof MetaException) {
    - result.o1 = (MetaException) e;
    - result.setO1IsSet(true);
    - msg = result;
    - }
    - else if (e instanceof NoSuchObjectException) {
    - result.o2 = (NoSuchObjectException) e;
    + result.o2 = (MetaException) e;
                              result.setO2IsSet(true);
                              msg = result;
                  }
    @@ -15971,25 +15914,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, get_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    - iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler);
    + public void start(I iface, get_partition_names_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
    + iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts,resultHandler);
            }
          }

    - public static class get_part_specs_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_part_specs_by_filter_args, List<PartitionSpec>> {
    - public get_part_specs_by_filter() {
    - super("get_part_specs_by_filter");
    + public static class get_partitions_ps<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_ps_args, List<Partition>> {
    + public get_partitions_ps() {
    + super("get_partitions_ps");
            }

    - public get_part_specs_by_filter_args getEmptyArgsInstance() {
    - return new get_part_specs_by_filter_args();
    + public get_partitions_ps_args getEmptyArgsInstance() {
    + return new get_partitions_ps_args();
            }

    - public AsyncMethodCallback<List<PartitionSpec>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<List<PartitionSpec>>() {
    - public void onComplete(List<PartitionSpec> o) {
    - get_part_specs_by_filter_result result = new get_part_specs_by_filter_result();
    + return new AsyncMethodCallback<List<Partition>>() {
    + public void onComplete(List<Partition> o) {
    + get_partitions_ps_result result = new get_partitions_ps_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -16002,7 +15945,7 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - get_part_specs_by_filter_result result = new get_part_specs_by_filter_result();
    + get_partitions_ps_result result = new get_partitions_ps_result();
                  if (e instanceof MetaException) {
                              result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
    @@ -16033,25 +15976,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, get_part_specs_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<PartitionSpec>> resultHandler) throws TException {
    - iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler);
    + public void start(I iface, get_partitions_ps_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    + iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler);
            }
          }

    - public static class get_partitions_by_expr<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_by_expr_args, PartitionsByExprResult> {
    - public get_partitions_by_expr() {
    - super("get_partitions_by_expr");
    + public static class get_partitions_ps_with_auth<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_ps_with_auth_args, List<Partition>> {
    + public get_partitions_ps_with_auth() {
    + super("get_partitions_ps_with_auth");
            }

    - public get_partitions_by_expr_args getEmptyArgsInstance() {
    - return new get_partitions_by_expr_args();
    + public get_partitions_ps_with_auth_args getEmptyArgsInstance() {
    + return new get_partitions_ps_with_auth_args();
            }

    - public AsyncMethodCallback<PartitionsByExprResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<PartitionsByExprResult>() {
    - public void onComplete(PartitionsByExprResult o) {
    - get_partitions_by_expr_result result = new get_partitions_by_expr_result();
    + return new AsyncMethodCallback<List<Partition>>() {
    + public void onComplete(List<Partition> o) {
    + get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -16064,14 +16007,14 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - get_partitions_by_expr_result result = new get_partitions_by_expr_result();
    - if (e instanceof MetaException) {
    - result.o1 = (MetaException) e;
    + get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result();
    + if (e instanceof NoSuchObjectException) {
    + result.o1 = (NoSuchObjectException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof NoSuchObjectException) {
    - result.o2 = (NoSuchObjectException) e;
    + else if (e instanceof MetaException) {
    + result.o2 = (MetaException) e;
                              result.setO2IsSet(true);
                              msg = result;
                  }
    @@ -16095,25 +16038,25 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, get_partitions_by_expr_args args, org.apache.thrift.async.AsyncMethodCallback<PartitionsByExprResult> resultHandler) throws TException {
    - iface.get_partitions_by_expr(args.req,resultHandler);
    + public void start(I iface, get_partitions_ps_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    + iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names,resultHandler);
            }
          }

    - public static class get_partitions_by_names<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_by_names_args, List<Partition>> {
    - public get_partitions_by_names() {
    - super("get_partitions_by_names");
    + public static class get_partition_names_ps<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partition_names_ps_args, List<String>> {
    + public get_partition_names_ps() {
    + super("get_partition_names_ps");
            }

    - public get_partitions_by_names_args getEmptyArgsInstance() {
    - return new get_partitions_by_names_args();
    + public get_partition_names_ps_args getEmptyArgsInstance() {
    + return new get_partition_names_ps_args();
            }

    - public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<List<Partition>>() {
    - public void onComplete(List<Partition> o) {
    - get_partitions_by_names_result result = new get_partitions_by_names_result();
    + return new AsyncMethodCallback<List<String>>() {
    + public void onComplete(List<String> o) {
    + get_partition_names_ps_result result = new get_partition_names_ps_result();
                  result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    @@ -16126,7 +16069,7 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - get_partitions_by_names_result result = new get_partitions_by_names_result();
    + get_partition_names_ps_result result = new get_partition_names_ps_result();
                  if (e instanceof MetaException) {
                              result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
    @@ -16157,25 +16100,26 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, get_partitions_by_names_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    - iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names,resultHandler);
    + public void start(I iface, get_partition_names_ps_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
    + iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler);
            }
          }

    - public static class alter_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partition_args, Void> {
    - public alter_partition() {
    - super("alter_partition");
    + public static class get_partitions_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_by_filter_args, List<Partition>> {
    + public get_partitions_by_filter() {
    + super("get_partitions_by_filter");
            }

    - public alter_partition_args getEmptyArgsInstance() {
    - return new alter_partition_args();
    + public get_partitions_by_filter_args getEmptyArgsInstance() {
    + return new get_partitions_by_filter_args();
            }

    - public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Void>() {
    - public void onComplete(Void o) {
    - alter_partition_result result = new alter_partition_result();
    + return new AsyncMethodCallback<List<Partition>>() {
    + public void onComplete(List<Partition> o) {
    + get_partitions_by_filter_result result = new get_partitions_by_filter_result();
    + result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -16187,14 +16131,14 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - alter_partition_result result = new alter_partition_result();
    - if (e instanceof InvalidOperationException) {
    - result.o1 = (InvalidOperationException) e;
    + get_partitions_by_filter_result result = new get_partitions_by_filter_result();
    + if (e instanceof MetaException) {
    + result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof MetaException) {
    - result.o2 = (MetaException) e;
    + else if (e instanceof NoSuchObjectException) {
    + result.o2 = (NoSuchObjectException) e;
                              result.setO2IsSet(true);
                              msg = result;
                  }
    @@ -16218,25 +16162,26 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, alter_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    - iface.alter_partition(args.db_name, args.tbl_name, args.new_part,resultHandler);
    + public void start(I iface, get_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    + iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler);
            }
          }

    - public static class alter_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partitions_args, Void> {
    - public alter_partitions() {
    - super("alter_partitions");
    + public static class get_part_specs_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_part_specs_by_filter_args, List<PartitionSpec>> {
    + public get_part_specs_by_filter() {
    + super("get_part_specs_by_filter");
            }

    - public alter_partitions_args getEmptyArgsInstance() {
    - return new alter_partitions_args();
    + public get_part_specs_by_filter_args getEmptyArgsInstance() {
    + return new get_part_specs_by_filter_args();
            }

    - public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<PartitionSpec>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Void>() {
    - public void onComplete(Void o) {
    - alter_partitions_result result = new alter_partitions_result();
    + return new AsyncMethodCallback<List<PartitionSpec>>() {
    + public void onComplete(List<PartitionSpec> o) {
    + get_part_specs_by_filter_result result = new get_part_specs_by_filter_result();
    + result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -16248,14 +16193,14 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - alter_partitions_result result = new alter_partitions_result();
    - if (e instanceof InvalidOperationException) {
    - result.o1 = (InvalidOperationException) e;
    + get_part_specs_by_filter_result result = new get_part_specs_by_filter_result();
    + if (e instanceof MetaException) {
    + result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof MetaException) {
    - result.o2 = (MetaException) e;
    + else if (e instanceof NoSuchObjectException) {
    + result.o2 = (NoSuchObjectException) e;
                              result.setO2IsSet(true);
                              msg = result;
                  }
    @@ -16279,25 +16224,26 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, alter_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    - iface.alter_partitions(args.db_name, args.tbl_name, args.new_parts,resultHandler);
    + public void start(I iface, get_part_specs_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<PartitionSpec>> resultHandler) throws TException {
    + iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler);
            }
          }

    - public static class alter_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partition_with_environment_context_args, Void> {
    - public alter_partition_with_environment_context() {
    - super("alter_partition_with_environment_context");
    + public static class get_partitions_by_expr<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_by_expr_args, PartitionsByExprResult> {
    + public get_partitions_by_expr() {
    + super("get_partitions_by_expr");
            }

    - public alter_partition_with_environment_context_args getEmptyArgsInstance() {
    - return new alter_partition_with_environment_context_args();
    + public get_partitions_by_expr_args getEmptyArgsInstance() {
    + return new get_partitions_by_expr_args();
            }

    - public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<PartitionsByExprResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Void>() {
    - public void onComplete(Void o) {
    - alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result();
    + return new AsyncMethodCallback<PartitionsByExprResult>() {
    + public void onComplete(PartitionsByExprResult o) {
    + get_partitions_by_expr_result result = new get_partitions_by_expr_result();
    + result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -16309,14 +16255,14 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result();
    - if (e instanceof InvalidOperationException) {
    - result.o1 = (InvalidOperationException) e;
    + get_partitions_by_expr_result result = new get_partitions_by_expr_result();
    + if (e instanceof MetaException) {
    + result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof MetaException) {
    - result.o2 = (MetaException) e;
    + else if (e instanceof NoSuchObjectException) {
    + result.o2 = (NoSuchObjectException) e;
                              result.setO2IsSet(true);
                              msg = result;
                  }
    @@ -16340,25 +16286,26 @@ public class ThriftHiveMetastore {
              return false;
            }

    - public void start(I iface, alter_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    - iface.alter_partition_with_environment_context(args.db_name, args.tbl_name, args.new_part, args.environment_context,resultHandler);
    + public void start(I iface, get_partitions_by_expr_args args, org.apache.thrift.async.AsyncMethodCallback<PartitionsByExprResult> resultHandler) throws TException {
    + iface.get_partitions_by_expr(args.req,resultHandler);
            }
          }

    - public static class rename_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, rename_partition_args, Void> {
    - public rename_partition() {
    - super("rename_partition");
    + public static class get_partitions_by_names<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_partitions_by_names_args, List<Partition>> {
    + public get_partitions_by_names() {
    + super("get_partitions_by_names");
            }

    - public rename_partition_args getEmptyArgsInstance() {
    - return new rename_partition_args();
    + public get_partitions_by_names_args getEmptyArgsInstance() {
    + return new get_partitions_by_names_args();
            }

    - public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + public AsyncMethodCallback<List<Partition>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
              final org.apache.thrift.AsyncProcessFunction fcall = this;
    - return new AsyncMethodCallback<Void>() {
    - public void onComplete(Void o) {
    - rename_partition_result result = new rename_partition_result();
    + return new AsyncMethodCallback<List<Partition>>() {
    + public void onComplete(List<Partition> o) {
    + get_partitions_by_names_result result = new get_partitions_by_names_result();
    + result.success = o;
                  try {
                    fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
                    return;
    @@ -16370,14 +16317,258 @@ public class ThriftHiveMetastore {
                public void onError(Exception e) {
                  byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                  org.apache.thrift.TBase msg;
    - rename_partition_result result = new rename_partition_result();
    - if (e instanceof InvalidOperationException) {
    - result.o1 = (InvalidOperationException) e;
    + get_partitions_by_names_result result = new get_partitions_by_names_result();
    + if (e instanceof MetaException) {
    + result.o1 = (MetaException) e;
                              result.setO1IsSet(true);
                              msg = result;
                  }
    - else if (e instanceof MetaException) {
    - result.o2 = (MetaException) e;
    + else if (e instanceof NoSuchObjectException) {
    + result.o2 = (NoSuchObjectException) e;
    + result.setO2IsSet(true);
    + msg = result;
    + }
    + else
    + {
    + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    + }
    + try {
    + fcall.sendResponse(fb,msg,msgType,seqid);
    + return;
    + } catch (Exception ex) {
    + LOGGER.error("Exception writing to internal frame buffer", ex);
    + }
    + fb.close();
    + }
    + };
    + }
    +
    + protected boolean isOneway() {
    + return false;
    + }
    +
    + public void start(I iface, get_partitions_by_names_args args, org.apache.thrift.async.AsyncMethodCallback<List<Partition>> resultHandler) throws TException {
    + iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names,resultHandler);
    + }
    + }
    +
    + public static class alter_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partition_args, Void> {
    + public alter_partition() {
    + super("alter_partition");
    + }
    +
    + public alter_partition_args getEmptyArgsInstance() {
    + return new alter_partition_args();
    + }
    +
    + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + final org.apache.thrift.AsyncProcessFunction fcall = this;
    + return new AsyncMethodCallback<Void>() {
    + public void onComplete(Void o) {
    + alter_partition_result result = new alter_partition_result();
    + try {
    + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    + return;
    + } catch (Exception e) {
    + LOGGER.error("Exception writing to internal frame buffer", e);
    + }
    + fb.close();
    + }
    + public void onError(Exception e) {
    + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    + org.apache.thrift.TBase msg;
    + alter_partition_result result = new alter_partition_result();
    + if (e instanceof InvalidOperationException) {
    + result.o1 = (InvalidOperationException) e;
    + result.setO1IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof MetaException) {
    + result.o2 = (MetaException) e;
    + result.setO2IsSet(true);
    + msg = result;
    + }
    + else
    + {
    + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    + }
    + try {
    + fcall.sendResponse(fb,msg,msgType,seqid);
    + return;
    + } catch (Exception ex) {
    + LOGGER.error("Exception writing to internal frame buffer", ex);
    + }
    + fb.close();
    + }
    + };
    + }
    +
    + protected boolean isOneway() {
    + return false;
    + }
    +
    + public void start(I iface, alter_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    + iface.alter_partition(args.db_name, args.tbl_name, args.new_part,resultHandler);
    + }
    + }
    +
    + public static class alter_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partitions_args, Void> {
    + public alter_partitions() {
    + super("alter_partitions");
    + }
    +
    + public alter_partitions_args getEmptyArgsInstance() {
    + return new alter_partitions_args();
    + }
    +
    + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + final org.apache.thrift.AsyncProcessFunction fcall = this;
    + return new AsyncMethodCallback<Void>() {
    + public void onComplete(Void o) {
    + alter_partitions_result result = new alter_partitions_result();
    + try {
    + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    + return;
    + } catch (Exception e) {
    + LOGGER.error("Exception writing to internal frame buffer", e);
    + }
    + fb.close();
    + }
    + public void onError(Exception e) {
    + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    + org.apache.thrift.TBase msg;
    + alter_partitions_result result = new alter_partitions_result();
    + if (e instanceof InvalidOperationException) {
    + result.o1 = (InvalidOperationException) e;
    + result.setO1IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof MetaException) {
    + result.o2 = (MetaException) e;
    + result.setO2IsSet(true);
    + msg = result;
    + }
    + else
    + {
    + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    + }
    + try {
    + fcall.sendResponse(fb,msg,msgType,seqid);
    + return;
    + } catch (Exception ex) {
    + LOGGER.error("Exception writing to internal frame buffer", ex);
    + }
    + fb.close();
    + }
    + };
    + }
    +
    + protected boolean isOneway() {
    + return false;
    + }
    +
    + public void start(I iface, alter_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    + iface.alter_partitions(args.db_name, args.tbl_name, args.new_parts,resultHandler);
    + }
    + }
    +
    + public static class alter_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partition_with_environment_context_args, Void> {
    + public alter_partition_with_environment_context() {
    + super("alter_partition_with_environment_context");
    + }
    +
    + public alter_partition_with_environment_context_args getEmptyArgsInstance() {
    + return new alter_partition_with_environment_context_args();
    + }
    +
    + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + final org.apache.thrift.AsyncProcessFunction fcall = this;
    + return new AsyncMethodCallback<Void>() {
    + public void onComplete(Void o) {
    + alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result();
    + try {
    + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    + return;
    + } catch (Exception e) {
    + LOGGER.error("Exception writing to internal frame buffer", e);
    + }
    + fb.close();
    + }
    + public void onError(Exception e) {
    + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    + org.apache.thrift.TBase msg;
    + alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result();
    + if (e instanceof InvalidOperationException) {
    + result.o1 = (InvalidOperationException) e;
    + result.setO1IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof MetaException) {
    + result.o2 = (MetaException) e;
    + result.setO2IsSet(true);
    + msg = result;
    + }
    + else
    + {
    + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
    + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
    + }
    + try {
    + fcall.sendResponse(fb,msg,msgType,seqid);
    + return;
    + } catch (Exception ex) {
    + LOGGER.error("Exception writing to internal frame buffer", ex);
    + }
    + fb.close();
    + }
    + };
    + }
    +
    + protected boolean isOneway() {
    + return false;
    + }
    +
    + public void start(I iface, alter_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
    + iface.alter_partition_with_environment_context(args.db_name, args.tbl_name, args.new_part, args.environment_context,resultHandler);
    + }
    + }
    +
    + public static class rename_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, rename_partition_args, Void> {
    + public rename_partition() {
    + super("rename_partition");
    + }
    +
    + public rename_partition_args getEmptyArgsInstance() {
    + return new rename_partition_args();
    + }
    +
    + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
    + final org.apache.thrift.AsyncProcessFunction fcall = this;
    + return new AsyncMethodCallback<Void>() {
    + public void onComplete(Void o) {
    + rename_partition_result result = new rename_partition_result();
    + try {
    + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
    + return;
    + } catch (Exception e) {
    + LOGGER.error("Exception writing to internal frame buffer", e);
    + }
    + fb.close();
    + }
    + public void onError(Exception e) {
    + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
    + org.apache.thrift.TBase msg;
    + rename_partition_result result = new rename_partition_result();
    + if (e instanceof InvalidOperationException) {
    + result.o1 = (InvalidOperationException) e;
    + result.setO1IsSet(true);
    + msg = result;
    + }
    + else if (e instanceof MetaException) {
    + result.o2 = (MetaException) e;
                              result.setO2IsSet(true);
                              msg = result;
                  }
    @@ -67468,7 +67659,1554 @@ public class ThriftHiveMetastore {

          @Override
          public String toString() {
    - StringBuilder sb = new StringBuilder("get_partition_result(");
    + StringBuilder sb = new StringBuilder("get_partition_result(");
    + boolean first = true;
    +
    + sb.append("success:");
    + if (this.success == null) {
    + sb.append("null");
    + } else {
    + sb.append(this.success);
    + }
    + first = false;
    + if (!first) sb.append(", ");
    + sb.append("o1:");
    + if (this.o1 == null) {
    + sb.append("null");
    + } else {
    + sb.append(this.o1);
    + }
    + first = false;
    + if (!first) sb.append(", ");
    + sb.append("o2:");
    + if (this.o2 == null) {
    + sb.append("null");
    + } else {
    + sb.append(this.o2);
    + }
    + first = false;
    + sb.append(")");
    + return sb.toString();
    + }
    +
    + public void validate() throws org.apache.thrift.TException {
    + // check for required fields
    + // check for sub-struct validity
    + if (success != null) {
    + success.validate();
    + }
    + }
    +
    + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
    + try {
    + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
    + } catch (org.apache.thrift.TException te) {
    + throw new java.io.IOException(te);
    + }
    + }
    +
    + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
    + try {
    + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
    + } catch (org.apache.thrift.TException te) {
    + throw new java.io.IOException(te);
    + }
    + }
    +
    + private static class get_partition_resultStandardSchemeFactory implements SchemeFactory {
    + public get_partition_resultStandardScheme getScheme() {
    + return new get_partition_resultStandardScheme();
    + }
    + }
    +
    + private static class get_partition_resultStandardScheme extends StandardScheme<get_partition_result> {
    +
    + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_result struct) throws org.apache.thrift.TException {
    + org.apache.thrift.protocol.TField schemeField;
    + iprot.readStructBegin();
    + while (true)
    + {
    + schemeField = iprot.readFieldBegin();
    + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
    + break;
    + }
    + switch (schemeField.id) {
    + case 0: // SUCCESS
    + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
    + struct.success = new Partition();
    + struct.success.read(iprot);
    + struct.setSuccessIsSet(true);
    + } else {
    + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
    + }
    + break;
    + case 1: // O1
    + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
    + struct.o1 = new MetaException();
    + struct.o1.read(iprot);
    + struct.setO1IsSet(true);
    + } else {
    + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
    + }
    + break;
    + case 2: // O2
    + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
    + struct.o2 = new NoSuchObjectException();
    + struct.o2.read(iprot);
    + struct.setO2IsSet(true);
    + } else {
    + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
    + }
    + break;
    + default:
    + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
    + }
    + iprot.readFieldEnd();
    + }
    + iprot.readStructEnd();
    + struct.validate();
    + }
    +
    + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_result struct) throws org.apache.thrift.TException {
    + struct.validate();
    +
    + oprot.writeStructBegin(STRUCT_DESC);
    + if (struct.success != null) {
    + oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
    + struct.success.write(oprot);
    + oprot.writeFieldEnd();
    + }
    + if (struct.o1 != null) {
    + oprot.writeFieldBegin(O1_FIELD_DESC);
    + struct.o1.write(oprot);
    + oprot.writeFieldEnd();
    + }
    + if (struct.o2 != null) {
    + oprot.writeFieldBegin(O2_FIELD_DESC);
    + struct.o2.write(oprot);
    + oprot.writeFieldEnd();
    + }
    + oprot.writeFieldStop();
    + oprot.writeStructEnd();
    + }
    +
    + }
    +
    + private static class get_partition_resultTupleSchemeFactory implements SchemeFactory {
    + public get_partition_resultTupleScheme getScheme() {
    + return new get_partition_resultTupleScheme();
    + }
    + }
    +
    + private static class get_partition_resultTupleScheme extends TupleScheme<get_partition_result> {
    +
    + @Override
    + public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_result struct) throws org.apache.thrift.TException {
    + TTupleProtocol oprot = (TTupleProtocol) prot;
    + BitSet optionals = new BitSet();
    + if (struct.isSetSuccess()) {
    + optionals.set(0);
    + }
    + if (struct.isSetO1()) {
    + optionals.set(1);
    + }
    + if (struct.isSetO2()) {
    + optionals.set(2);
    + }
    + oprot.writeBitSet(optionals, 3);
    + if (struct.isSetSuccess()) {
    + struct.success.write(oprot);
    + }
    + if (struct.isSetO1()) {
    + struct.o1.write(oprot);
    + }
    + if (struct.isSetO2()) {
    + struct.o2.write(oprot);
    + }
    + }
    +
    + @Override
    + public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_result struct) throws org.apache.thrift.TException {
    + TTupleProtocol iprot = (TTupleProtocol) prot;
    + BitSet incoming = iprot.readBitSet(3);
    + if (incoming.get(0)) {
    + struct.success = new Partition();
    + struct.success.read(iprot);
    + struct.setSuccessIsSet(true);
    + }
    + if (incoming.get(1)) {
    + struct.o1 = new MetaException();
    + struct.o1.read(iprot);
    + struct.setO1IsSet(true);
    + }
    + if (incoming.get(2)) {
    + struct.o2 = new NoSuchObjectException();
    + struct.o2.read(iprot);
    + struct.setO2IsSet(true);
    + }
    + }
    + }
    +
    + }
    +
    + public static class exchange_partition_args implements org.apache.thrift.TBase<exchange_partition_args, exchange_partition_args._Fields>, java.io.Serializable, Cloneable, Comparable<exchange_partition_args> {
    + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("exchange_partition_args");
    +
    + private static final org.apache.thrift.protocol.TField PARTITION_SPECS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionSpecs", org.apache.thrift.protocol.TType.MAP, (short)1);
    + private static final org.apache.thrift.protocol.TField SOURCE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("source_db", org.apache.thrift.protocol.TType.STRING, (short)2);
    + private static final org.apache.thrift.protocol.TField SOURCE_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("source_table_name", org.apache.thrift.protocol.TType.STRING, (short)3);
    + private static final org.apache.thrift.protocol.TField DEST_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("dest_db", org.apache.thrift.protocol.TType.STRING, (short)4);
    + private static final org.apache.thrift.protocol.TField DEST_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dest_table_name", org.apache.thrift.protocol.TType.STRING, (short)5);
    +
    + private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
    + static {
    + schemes.put(StandardScheme.class, new exchange_partition_argsStandardSchemeFactory());
    + schemes.put(TupleScheme.class, new exchange_partition_argsTupleSchemeFactory());
    + }
    +
    + private Map<String,String> partitionSpecs; // required
    + private String source_db; // required
    + private String source_table_name; // required
    + private String dest_db; // required
    + private String dest_table_name; // required
    +
    + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
    + public enum _Fields implements org.apache.thrift.TFieldIdEnum {
    + PARTITION_SPECS((short)1, "partitionSpecs"),
    + SOURCE_DB((short)2, "source_db"),
    + SOURCE_TABLE_NAME((short)3, "source_table_name"),
    + DEST_DB((short)4, "dest_db"),
    + DEST_TABLE_NAME((short)5, "dest_table_name");
    +
    + private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
    +
    + static {
    + for (_Fields field : EnumSet.allOf(_Fields.class)) {
    + byName.put(field.getFieldName(), field);
    + }
    + }
    +
    + /**
    + * Find the _Fields constant that matches fieldId, or null if its not found.
    + */
    + public static _Fields findByThriftId(int fieldId) {
    + switch(fieldId) {
    + case 1: // PARTITION_SPECS
    + return PARTITION_SPECS;
    + case 2: // SOURCE_DB
    + return SOURCE_DB;
    + case 3: // SOURCE_TABLE_NAME
    + return SOURCE_TABLE_NAME;
    + case 4: // DEST_DB
    + return DEST_DB;
    + case 5: // DEST_TABLE_NAME
    + return DEST_TABLE_NAME;
    + default:
    + return null;
    + }
    + }
    +
    + /**
    + * Find the _Fields constant that matches fieldId, throwing an exception
    + * if it is not found.
    + */
    + public static _Fields findByThriftIdOrThrow(int fieldId) {
    + _Fields fields = findByThriftId(fieldId);
    + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
    + return fields;
    + }
    +
    + /**
    + * Find the _Fields constant that matches name, or null if its not found.
    + */
    + public static _Fields findByName(String name) {
    + return byName.get(name);
    + }
    +
    + private final short _thriftId;
    + private final String _fieldName;
    +
    + _Fields(short thriftId, String fieldName) {
    + _thriftId = thriftId;
    + _fieldName = fieldName;
    + }
    +
    + public short getThriftFieldId() {
    + return _thriftId;
    + }
    +
    + public String getFieldName() {
    + return _fieldName;
    + }
    + }
    +
    + // isset id assignments
    + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
    + static {
    + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
    + tmpMap.put(_Fields.PARTITION_SPECS, new org.apache.thrift.meta_data.FieldMetaData("partitionSpecs", org.apache.thrift.TFieldRequirementType.DEFAULT,
    + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
    + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING),
    + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
    + tmpMap.put(_Fields.SOURCE_DB, new org.apache.thrift.meta_data.FieldMetaData("source_db", org.apache.thrift.TFieldRequirementType.DEFAULT,
    + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
    + tmpMap.put(_Fields.SOURCE_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("source_table_name", org.apache.thrift.TFieldRequirementType.DEFAULT,
    + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
    + tmpMap.put(_Fields.DEST_DB, new org.apache.thrift.meta_data.FieldMetaData("dest_db", org.apache.thrift.TFieldRequirementType.DEFAULT,
    + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
    + tmpMap.put(_Fields.DEST_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("dest_table_name", org.apache.thrift.TFieldRequirementType.DEFAULT,
    + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
    + metaDataMap = Collections.unmodifiableMap(tmpMap);
    + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(exchange_partition_args.class, metaDataMap);
    + }
    +
    + public exchange_partition_args() {
    + }
    +
    + public exchange_partition_args(
    + Map<String,String> partitionSpecs,
    + String source_db,
    + String source_table_name,
    + String dest_db,
    + String dest_table_name)
    + {
    + this();
    + this.partitionSpecs = partitionSpecs;
    + this.source_db = source_db;
    + this.source_table_name = source_table_name;
    + this.dest_db = dest_db;
    + this.dest_table_name = dest_table_name;
    + }
    +
    + /**
    + * Performs a deep copy on <i>other</i>.
    + */
    + public exchange_partition_args(exchange_partition_args other) {
    + if (other.isSetPartitionSpecs()) {
    + Map<String,String> __this__partitionSpecs = new HashMap<String,String>(other.partitionSpecs);
    + this.partitionSpecs = __this__partitionSpecs;
    + }
    + if (other.isSetSource_db()) {
    + this.source_db = other.source_db;
    + }
    + if (other.isSetSource_table_name()) {
    + this.source_table_name = other.source_table_name;
    + }
    + if (other.isSetDest_db()) {
    + this.dest_db = other.dest_db;
    + }
    + if (other.isSetDest_table_name()) {
    + this.dest_table_name = other.dest_table_name;
    + }
    + }
    +
    + public exchange_partition_args deepCopy() {
    + return new exchange_partition_args(this);
    + }
    +
    + @Override
    + public void clear() {
    + this.partitionSpecs = null;
    + this.source_db = null;
    + this.source_table_name = null;
    + this.dest_db = null;
    + this.dest_table_name = null;
    + }
    +
    + public int getPartitionSpecsSize() {
    + return (this.partitionSpecs == null) ? 0 : this.partitionSpecs.size();
    + }
    +
    + public void putToPartitionSpecs(String key, String val) {
    + if (this.partitionSpecs == null) {
    + this.partitionSpecs = new HashMap<String,String>();
    + }
    + this.partitionSpecs.put(key, val);
    + }
    +
    + public Map<String,String> getPartitionSpecs() {
    + return this.partitionSpecs;
    + }
    +
    + public void setPartitionSpecs(Map<String,String> partitionSpecs) {
    + this.partitionSpecs = partitionSpecs;
    + }
    +
    + public void unsetPartitionSpecs() {
    + this.partitionSpecs = null;
    + }
    +
    + /** Returns true if field partitionSpecs is set (has been assigned a value) and false otherwise */
    + public boolean isSetPartitionSpecs() {
    + return this.partitionSpecs != null;
    + }
    +
    + public void setPartitionSpecsIsSet(boolean value) {
    + if (!value) {
    + this.partitionSpecs = null;
    + }
    + }
    +
    + public String getSource_db() {
    + return this.source_db;
    + }
    +
    + public void setSource_db(String source_db) {
    + this.source_db = source_db;
    + }
    +
    + public void unsetSource_db() {
    + this.source_db = null;
    + }
    +
    + /** Returns true if field source_db is set (has been assigned a value) and false otherwise */
    + public boolean isSetSource_db() {
    + return this.source_db != null;
    + }
    +
    + public void setSource_dbIsSet(boolean value) {
    + if (!value) {
    + this.source_db = null;
    + }
    + }
    +
    + public String getSource_table_name() {
    + return this.source_table_name;
    + }
    +
    + public void setSource_table_name(String source_table_name) {
    + this.source_table_name = source_table_name;
    + }
    +
    + public void unsetSource_table_name() {
    + this.source_table_name = null;
    + }
    +
    + /** Returns true if field source_table_name is set (has been assigned a value) and false otherwise */
    + public boolean isSetSource_table_name() {
    + return this.source_table_name != null;
    + }
    +
    + public void setSource_table_nameIsSet(boolean value) {
    + if (!value) {
    + this.source_table_name = null;
    + }
    + }
    +
    + public String getDest_db

    <TRUNCATED>
  • Jxiang at Nov 6, 2015 at 5:32 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
    index c8f16a7..3d7cb18 100644
    --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
    +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
    @@ -66,6 +66,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService
        virtual void drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req) = 0;
        virtual void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals) = 0;
        virtual void exchange_partition(Partition& _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) = 0;
    + virtual void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) = 0;
        virtual void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names) = 0;
        virtual void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) = 0;
        virtual void get_partitions(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) = 0;
    @@ -320,6 +321,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
        void exchange_partition(Partition& /* _return */, const std::map<std::string, std::string> & /* partitionSpecs */, const std::string& /* source_db */, const std::string& /* source_table_name */, const std::string& /* dest_db */, const std::string& /* dest_table_name */) {
          return;
        }
    + void exchange_partitions(std::vector<Partition> & /* _return */, const std::map<std::string, std::string> & /* partitionSpecs */, const std::string& /* source_db */, const std::string& /* source_table_name */, const std::string& /* dest_db */, const std::string& /* dest_table_name */) {
    + return;
    + }
        void get_partition_with_auth(Partition& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */, const std::string& /* user_name */, const std::vector<std::string> & /* group_names */) {
          return;
        }
    @@ -6328,6 +6332,170 @@ class ThriftHiveMetastore_exchange_partition_presult {

      };

    +typedef struct _ThriftHiveMetastore_exchange_partitions_args__isset {
    + _ThriftHiveMetastore_exchange_partitions_args__isset() : partitionSpecs(false), source_db(false), source_table_name(false), dest_db(false), dest_table_name(false) {}
    + bool partitionSpecs :1;
    + bool source_db :1;
    + bool source_table_name :1;
    + bool dest_db :1;
    + bool dest_table_name :1;
    +} _ThriftHiveMetastore_exchange_partitions_args__isset;
    +
    +class ThriftHiveMetastore_exchange_partitions_args {
    + public:
    +
    + ThriftHiveMetastore_exchange_partitions_args(const ThriftHiveMetastore_exchange_partitions_args&);
    + ThriftHiveMetastore_exchange_partitions_args& operator=(const ThriftHiveMetastore_exchange_partitions_args&);
    + ThriftHiveMetastore_exchange_partitions_args() : source_db(), source_table_name(), dest_db(), dest_table_name() {
    + }
    +
    + virtual ~ThriftHiveMetastore_exchange_partitions_args() throw();
    + std::map<std::string, std::string> partitionSpecs;
    + std::string source_db;
    + std::string source_table_name;
    + std::string dest_db;
    + std::string dest_table_name;
    +
    + _ThriftHiveMetastore_exchange_partitions_args__isset __isset;
    +
    + void __set_partitionSpecs(const std::map<std::string, std::string> & val);
    +
    + void __set_source_db(const std::string& val);
    +
    + void __set_source_table_name(const std::string& val);
    +
    + void __set_dest_db(const std::string& val);
    +
    + void __set_dest_table_name(const std::string& val);
    +
    + bool operator == (const ThriftHiveMetastore_exchange_partitions_args & rhs) const
    + {
    + if (!(partitionSpecs == rhs.partitionSpecs))
    + return false;
    + if (!(source_db == rhs.source_db))
    + return false;
    + if (!(source_table_name == rhs.source_table_name))
    + return false;
    + if (!(dest_db == rhs.dest_db))
    + return false;
    + if (!(dest_table_name == rhs.dest_table_name))
    + return false;
    + return true;
    + }
    + bool operator != (const ThriftHiveMetastore_exchange_partitions_args &rhs) const {
    + return !(*this == rhs);
    + }
    +
    + bool operator < (const ThriftHiveMetastore_exchange_partitions_args & ) const;
    +
    + uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
    + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
    +
    +};
    +
    +
    +class ThriftHiveMetastore_exchange_partitions_pargs {
    + public:
    +
    +
    + virtual ~ThriftHiveMetastore_exchange_partitions_pargs() throw();
    + const std::map<std::string, std::string> * partitionSpecs;
    + const std::string* source_db;
    + const std::string* source_table_name;
    + const std::string* dest_db;
    + const std::string* dest_table_name;
    +
    + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
    +
    +};
    +
    +typedef struct _ThriftHiveMetastore_exchange_partitions_result__isset {
    + _ThriftHiveMetastore_exchange_partitions_result__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {}
    + bool success :1;
    + bool o1 :1;
    + bool o2 :1;
    + bool o3 :1;
    + bool o4 :1;
    +} _ThriftHiveMetastore_exchange_partitions_result__isset;
    +
    +class ThriftHiveMetastore_exchange_partitions_result {
    + public:
    +
    + ThriftHiveMetastore_exchange_partitions_result(const ThriftHiveMetastore_exchange_partitions_result&);
    + ThriftHiveMetastore_exchange_partitions_result& operator=(const ThriftHiveMetastore_exchange_partitions_result&);
    + ThriftHiveMetastore_exchange_partitions_result() {
    + }
    +
    + virtual ~ThriftHiveMetastore_exchange_partitions_result() throw();
    + std::vector<Partition> success;
    + MetaException o1;
    + NoSuchObjectException o2;
    + InvalidObjectException o3;
    + InvalidInputException o4;
    +
    + _ThriftHiveMetastore_exchange_partitions_result__isset __isset;
    +
    + void __set_success(const std::vector<Partition> & val);
    +
    + void __set_o1(const MetaException& val);
    +
    + void __set_o2(const NoSuchObjectException& val);
    +
    + void __set_o3(const InvalidObjectException& val);
    +
    + void __set_o4(const InvalidInputException& val);
    +
    + bool operator == (const ThriftHiveMetastore_exchange_partitions_result & rhs) const
    + {
    + if (!(success == rhs.success))
    + return false;
    + if (!(o1 == rhs.o1))
    + return false;
    + if (!(o2 == rhs.o2))
    + return false;
    + if (!(o3 == rhs.o3))
    + return false;
    + if (!(o4 == rhs.o4))
    + return false;
    + return true;
    + }
    + bool operator != (const ThriftHiveMetastore_exchange_partitions_result &rhs) const {
    + return !(*this == rhs);
    + }
    +
    + bool operator < (const ThriftHiveMetastore_exchange_partitions_result & ) const;
    +
    + uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
    + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
    +
    +};
    +
    +typedef struct _ThriftHiveMetastore_exchange_partitions_presult__isset {
    + _ThriftHiveMetastore_exchange_partitions_presult__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {}
    + bool success :1;
    + bool o1 :1;
    + bool o2 :1;
    + bool o3 :1;
    + bool o4 :1;
    +} _ThriftHiveMetastore_exchange_partitions_presult__isset;
    +
    +class ThriftHiveMetastore_exchange_partitions_presult {
    + public:
    +
    +
    + virtual ~ThriftHiveMetastore_exchange_partitions_presult() throw();
    + std::vector<Partition> * success;
    + MetaException o1;
    + NoSuchObjectException o2;
    + InvalidObjectException o3;
    + InvalidInputException o4;
    +
    + _ThriftHiveMetastore_exchange_partitions_presult__isset __isset;
    +
    + uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
    +
    +};
    +
      typedef struct _ThriftHiveMetastore_get_partition_with_auth_args__isset {
        _ThriftHiveMetastore_get_partition_with_auth_args__isset() : db_name(false), tbl_name(false), part_vals(false), user_name(false), group_names(false) {}
        bool db_name :1;
    @@ -16868,6 +17036,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
        void exchange_partition(Partition& _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
        void send_exchange_partition(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
        void recv_exchange_partition(Partition& _return);
    + void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
    + void send_exchange_partitions(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
    + void recv_exchange_partitions(std::vector<Partition> & _return);
        void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
        void send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
        void recv_get_partition_with_auth(Partition& _return);
    @@ -17177,6 +17348,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP
        void process_drop_partitions_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
        void process_get_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
        void process_exchange_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
    + void process_exchange_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
        void process_get_partition_with_auth(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
        void process_get_partition_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
        void process_get_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
    @@ -17310,6 +17482,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP
          processMap_["drop_partitions_req"] = &ThriftHiveMetastoreProcessor::process_drop_partitions_req;
          processMap_["get_partition"] = &ThriftHiveMetastoreProcessor::process_get_partition;
          processMap_["exchange_partition"] = &ThriftHiveMetastoreProcessor::process_exchange_partition;
    + processMap_["exchange_partitions"] = &ThriftHiveMetastoreProcessor::process_exchange_partitions;
          processMap_["get_partition_with_auth"] = &ThriftHiveMetastoreProcessor::process_get_partition_with_auth;
          processMap_["get_partition_by_name"] = &ThriftHiveMetastoreProcessor::process_get_partition_by_name;
          processMap_["get_partitions"] = &ThriftHiveMetastoreProcessor::process_get_partitions;
    @@ -17849,6 +18022,16 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
          return;
        }

    + void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) {
    + size_t sz = ifaces_.size();
    + size_t i = 0;
    + for (; i < (sz - 1); ++i) {
    + ifaces_[i]->exchange_partitions(_return, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name);
    + }
    + ifaces_[i]->exchange_partitions(_return, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name);
    + return;
    + }
    +
        void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names) {
          size_t sz = ifaces_.size();
          size_t i = 0;
    @@ -18815,6 +18998,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
        void exchange_partition(Partition& _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
        int32_t send_exchange_partition(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
        void recv_exchange_partition(Partition& _return, const int32_t seqid);
    + void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
    + int32_t send_exchange_partitions(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
    + void recv_exchange_partitions(std::vector<Partition> & _return, const int32_t seqid);
        void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
        int32_t send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
        void recv_get_partition_with_auth(Partition& _return, const int32_t seqid);

    http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
    index 9eca65c..a395729 100644
    --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
    +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
    @@ -242,6 +242,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
          printf("exchange_partition\n");
        }

    + void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) {
    + // Your implementation goes here
    + printf("exchange_partitions\n");
    + }
    +
        void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names) {
          // Your implementation goes here
          printf("get_partition_with_auth\n");
  • Jxiang at Nov 6, 2015 at 5:32 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    index e922d7d..a6862be 100644
    --- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    +++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    @@ -394,6 +394,19 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
         */
        public function exchange_partition(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name);
        /**
    + * @param array $partitionSpecs
    + * @param string $source_db
    + * @param string $source_table_name
    + * @param string $dest_db
    + * @param string $dest_table_name
    + * @return \metastore\Partition[]
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\InvalidInputException
    + */
    + public function exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name);
    + /**
         * @param string $db_name
         * @param string $tbl_name
         * @param string[] $part_vals
    @@ -3622,6 +3635,73 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
          throw new \Exception("exchange_partition failed: unknown result");
        }

    + public function exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name)
    + {
    + $this->send_exchange_partitions($partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name);
    + return $this->recv_exchange_partitions();
    + }
    +
    + public function send_exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name)
    + {
    + $args = new \metastore\ThriftHiveMetastore_exchange_partitions_args();
    + $args->partitionSpecs = $partitionSpecs;
    + $args->source_db = $source_db;
    + $args->source_table_name = $source_table_name;
    + $args->dest_db = $dest_db;
    + $args->dest_table_name = $dest_table_name;
    + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
    + if ($bin_accel)
    + {
    + thrift_protocol_write_binary($this->output_, 'exchange_partitions', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
    + }
    + else
    + {
    + $this->output_->writeMessageBegin('exchange_partitions', TMessageType::CALL, $this->seqid_);
    + $args->write($this->output_);
    + $this->output_->writeMessageEnd();
    + $this->output_->getTransport()->flush();
    + }
    + }
    +
    + public function recv_exchange_partitions()
    + {
    + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
    + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_exchange_partitions_result', $this->input_->isStrictRead());
    + else
    + {
    + $rseqid = 0;
    + $fname = null;
    + $mtype = 0;
    +
    + $this->input_->readMessageBegin($fname, $mtype, $rseqid);
    + if ($mtype == TMessageType::EXCEPTION) {
    + $x = new TApplicationException();
    + $x->read($this->input_);
    + $this->input_->readMessageEnd();
    + throw $x;
    + }
    + $result = new \metastore\ThriftHiveMetastore_exchange_partitions_result();
    + $result->read($this->input_);
    + $this->input_->readMessageEnd();
    + }
    + if ($result->success !== null) {
    + return $result->success;
    + }
    + if ($result->o1 !== null) {
    + throw $result->o1;
    + }
    + if ($result->o2 !== null) {
    + throw $result->o2;
    + }
    + if ($result->o3 !== null) {
    + throw $result->o3;
    + }
    + if ($result->o4 !== null) {
    + throw $result->o4;
    + }
    + throw new \Exception("exchange_partitions failed: unknown result");
    + }
    +
        public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names)
        {
          $this->send_get_partition_with_auth($db_name, $tbl_name, $part_vals, $user_name, $group_names);
    @@ -19473,6 +19553,410 @@ class ThriftHiveMetastore_exchange_partition_result {

      }

    +class ThriftHiveMetastore_exchange_partitions_args {
    + static $_TSPEC;
    +
    + /**
    + * @var array
    + */
    + public $partitionSpecs = null;
    + /**
    + * @var string
    + */
    + public $source_db = null;
    + /**
    + * @var string
    + */
    + public $source_table_name = null;
    + /**
    + * @var string
    + */
    + public $dest_db = null;
    + /**
    + * @var string
    + */
    + public $dest_table_name = null;
    +
    + public function __construct($vals=null) {
    + if (!isset(self::$_TSPEC)) {
    + self::$_TSPEC = array(
    + 1 => array(
    + 'var' => 'partitionSpecs',
    + 'type' => TType::MAP,
    + 'ktype' => TType::STRING,
    + 'vtype' => TType::STRING,
    + 'key' => array(
    + 'type' => TType::STRING,
    + ),
    + 'val' => array(
    + 'type' => TType::STRING,
    + ),
    + ),
    + 2 => array(
    + 'var' => 'source_db',
    + 'type' => TType::STRING,
    + ),
    + 3 => array(
    + 'var' => 'source_table_name',
    + 'type' => TType::STRING,
    + ),
    + 4 => array(
    + 'var' => 'dest_db',
    + 'type' => TType::STRING,
    + ),
    + 5 => array(
    + 'var' => 'dest_table_name',
    + 'type' => TType::STRING,
    + ),
    + );
    + }
    + if (is_array($vals)) {
    + if (isset($vals['partitionSpecs'])) {
    + $this->partitionSpecs = $vals['partitionSpecs'];
    + }
    + if (isset($vals['source_db'])) {
    + $this->source_db = $vals['source_db'];
    + }
    + if (isset($vals['source_table_name'])) {
    + $this->source_table_name = $vals['source_table_name'];
    + }
    + if (isset($vals['dest_db'])) {
    + $this->dest_db = $vals['dest_db'];
    + }
    + if (isset($vals['dest_table_name'])) {
    + $this->dest_table_name = $vals['dest_table_name'];
    + }
    + }
    + }
    +
    + public function getName() {
    + return 'ThriftHiveMetastore_exchange_partitions_args';
    + }
    +
    + public function read($input)
    + {
    + $xfer = 0;
    + $fname = null;
    + $ftype = 0;
    + $fid = 0;
    + $xfer += $input->readStructBegin($fname);
    + while (true)
    + {
    + $xfer += $input->readFieldBegin($fname, $ftype, $fid);
    + if ($ftype == TType::STOP) {
    + break;
    + }
    + switch ($fid)
    + {
    + case 1:
    + if ($ftype == TType::MAP) {
    + $this->partitionSpecs = array();
    + $_size669 = 0;
    + $_ktype670 = 0;
    + $_vtype671 = 0;
    + $xfer += $input->readMapBegin($_ktype670, $_vtype671, $_size669);
    + for ($_i673 = 0; $_i673 < $_size669; ++$_i673)
    + {
    + $key674 = '';
    + $val675 = '';
    + $xfer += $input->readString($key674);
    + $xfer += $input->readString($val675);
    + $this->partitionSpecs[$key674] = $val675;
    + }
    + $xfer += $input->readMapEnd();
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + case 2:
    + if ($ftype == TType::STRING) {
    + $xfer += $input->readString($this->source_db);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + case 3:
    + if ($ftype == TType::STRING) {
    + $xfer += $input->readString($this->source_table_name);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + case 4:
    + if ($ftype == TType::STRING) {
    + $xfer += $input->readString($this->dest_db);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + case 5:
    + if ($ftype == TType::STRING) {
    + $xfer += $input->readString($this->dest_table_name);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + default:
    + $xfer += $input->skip($ftype);
    + break;
    + }
    + $xfer += $input->readFieldEnd();
    + }
    + $xfer += $input->readStructEnd();
    + return $xfer;
    + }
    +
    + public function write($output) {
    + $xfer = 0;
    + $xfer += $output->writeStructBegin('ThriftHiveMetastore_exchange_partitions_args');
    + if ($this->partitionSpecs !== null) {
    + if (!is_array($this->partitionSpecs)) {
    + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
    + }
    + $xfer += $output->writeFieldBegin('partitionSpecs', TType::MAP, 1);
    + {
    + $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
    + {
    + foreach ($this->partitionSpecs as $kiter676 => $viter677)
    + {
    + $xfer += $output->writeString($kiter676);
    + $xfer += $output->writeString($viter677);
    + }
    + }
    + $output->writeMapEnd();
    + }
    + $xfer += $output->writeFieldEnd();
    + }
    + if ($this->source_db !== null) {
    + $xfer += $output->writeFieldBegin('source_db', TType::STRING, 2);
    + $xfer += $output->writeString($this->source_db);
    + $xfer += $output->writeFieldEnd();
    + }
    + if ($this->source_table_name !== null) {
    + $xfer += $output->writeFieldBegin('source_table_name', TType::STRING, 3);
    + $xfer += $output->writeString($this->source_table_name);
    + $xfer += $output->writeFieldEnd();
    + }
    + if ($this->dest_db !== null) {
    + $xfer += $output->writeFieldBegin('dest_db', TType::STRING, 4);
    + $xfer += $output->writeString($this->dest_db);
    + $xfer += $output->writeFieldEnd();
    + }
    + if ($this->dest_table_name !== null) {
    + $xfer += $output->writeFieldBegin('dest_table_name', TType::STRING, 5);
    + $xfer += $output->writeString($this->dest_table_name);
    + $xfer += $output->writeFieldEnd();
    + }
    + $xfer += $output->writeFieldStop();
    + $xfer += $output->writeStructEnd();
    + return $xfer;
    + }
    +
    +}
    +
    +class ThriftHiveMetastore_exchange_partitions_result {
    + static $_TSPEC;
    +
    + /**
    + * @var \metastore\Partition[]
    + */
    + public $success = null;
    + /**
    + * @var \metastore\MetaException
    + */
    + public $o1 = null;
    + /**
    + * @var \metastore\NoSuchObjectException
    + */
    + public $o2 = null;
    + /**
    + * @var \metastore\InvalidObjectException
    + */
    + public $o3 = null;
    + /**
    + * @var \metastore\InvalidInputException
    + */
    + public $o4 = null;
    +
    + public function __construct($vals=null) {
    + if (!isset(self::$_TSPEC)) {
    + self::$_TSPEC = array(
    + 0 => array(
    + 'var' => 'success',
    + 'type' => TType::LST,
    + 'etype' => TType::STRUCT,
    + 'elem' => array(
    + 'type' => TType::STRUCT,
    + 'class' => '\metastore\Partition',
    + ),
    + ),
    + 1 => array(
    + 'var' => 'o1',
    + 'type' => TType::STRUCT,
    + 'class' => '\metastore\MetaException',
    + ),
    + 2 => array(
    + 'var' => 'o2',
    + 'type' => TType::STRUCT,
    + 'class' => '\metastore\NoSuchObjectException',
    + ),
    + 3 => array(
    + 'var' => 'o3',
    + 'type' => TType::STRUCT,
    + 'class' => '\metastore\InvalidObjectException',
    + ),
    + 4 => array(
    + 'var' => 'o4',
    + 'type' => TType::STRUCT,
    + 'class' => '\metastore\InvalidInputException',
    + ),
    + );
    + }
    + if (is_array($vals)) {
    + if (isset($vals['success'])) {
    + $this->success = $vals['success'];
    + }
    + if (isset($vals['o1'])) {
    + $this->o1 = $vals['o1'];
    + }
    + if (isset($vals['o2'])) {
    + $this->o2 = $vals['o2'];
    + }
    + if (isset($vals['o3'])) {
    + $this->o3 = $vals['o3'];
    + }
    + if (isset($vals['o4'])) {
    + $this->o4 = $vals['o4'];
    + }
    + }
    + }
    +
    + public function getName() {
    + return 'ThriftHiveMetastore_exchange_partitions_result';
    + }
    +
    + public function read($input)
    + {
    + $xfer = 0;
    + $fname = null;
    + $ftype = 0;
    + $fid = 0;
    + $xfer += $input->readStructBegin($fname);
    + while (true)
    + {
    + $xfer += $input->readFieldBegin($fname, $ftype, $fid);
    + if ($ftype == TType::STOP) {
    + break;
    + }
    + switch ($fid)
    + {
    + case 0:
    + if ($ftype == TType::LST) {
    + $this->success = array();
    + $_size678 = 0;
    + $_etype681 = 0;
    + $xfer += $input->readListBegin($_etype681, $_size678);
    + for ($_i682 = 0; $_i682 < $_size678; ++$_i682)
    + {
    + $elem683 = null;
    + $elem683 = new \metastore\Partition();
    + $xfer += $elem683->read($input);
    + $this->success []= $elem683;
    + }
    + $xfer += $input->readListEnd();
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + case 1:
    + if ($ftype == TType::STRUCT) {
    + $this->o1 = new \metastore\MetaException();
    + $xfer += $this->o1->read($input);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + case 2:
    + if ($ftype == TType::STRUCT) {
    + $this->o2 = new \metastore\NoSuchObjectException();
    + $xfer += $this->o2->read($input);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + case 3:
    + if ($ftype == TType::STRUCT) {
    + $this->o3 = new \metastore\InvalidObjectException();
    + $xfer += $this->o3->read($input);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + case 4:
    + if ($ftype == TType::STRUCT) {
    + $this->o4 = new \metastore\InvalidInputException();
    + $xfer += $this->o4->read($input);
    + } else {
    + $xfer += $input->skip($ftype);
    + }
    + break;
    + default:
    + $xfer += $input->skip($ftype);
    + break;
    + }
    + $xfer += $input->readFieldEnd();
    + }
    + $xfer += $input->readStructEnd();
    + return $xfer;
    + }
    +
    + public function write($output) {
    + $xfer = 0;
    + $xfer += $output->writeStructBegin('ThriftHiveMetastore_exchange_partitions_result');
    + if ($this->success !== null) {
    + if (!is_array($this->success)) {
    + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
    + }
    + $xfer += $output->writeFieldBegin('success', TType::LST, 0);
    + {
    + $output->writeListBegin(TType::STRUCT, count($this->success));
    + {
    + foreach ($this->success as $iter684)
    + {
    + $xfer += $iter684->write($output);
    + }
    + }
    + $output->writeListEnd();
    + }
    + $xfer += $output->writeFieldEnd();
    + }
    + if ($this->o1 !== null) {
    + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
    + $xfer += $this->o1->write($output);
    + $xfer += $output->writeFieldEnd();
    + }
    + if ($this->o2 !== null) {
    + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
    + $xfer += $this->o2->write($output);
    + $xfer += $output->writeFieldEnd();
    + }
    + if ($this->o3 !== null) {
    + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
    + $xfer += $this->o3->write($output);
    + $xfer += $output->writeFieldEnd();
    + }
    + if ($this->o4 !== null) {
    + $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4);
    + $xfer += $this->o4->write($output);
    + $xfer += $output->writeFieldEnd();
    + }
    + $xfer += $output->writeFieldStop();
    + $xfer += $output->writeStructEnd();
    + return $xfer;
    + }
    +
    +}
    +
      class ThriftHiveMetastore_get_partition_with_auth_args {
        static $_TSPEC;

    @@ -19585,14 +20069,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
              case 3:
                if ($ftype == TType::LST) {
                  $this->part_vals = array();
    - $_size669 = 0;
    - $_etype672 = 0;
    - $xfer += $input->readListBegin($_etype672, $_size669);
    - for ($_i673 = 0; $_i673 < $_size669; ++$_i673)
    + $_size685 = 0;
    + $_etype688 = 0;
    + $xfer += $input->readListBegin($_etype688, $_size685);
    + for ($_i689 = 0; $_i689 < $_size685; ++$_i689)
                  {
    - $elem674 = null;
    - $xfer += $input->readString($elem674);
    - $this->part_vals []= $elem674;
    + $elem690 = null;
    + $xfer += $input->readString($elem690);
    + $this->part_vals []= $elem690;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -19609,14 +20093,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
              case 5:
                if ($ftype == TType::LST) {
                  $this->group_names = array();
    - $_size675 = 0;
    - $_etype678 = 0;
    - $xfer += $input->readListBegin($_etype678, $_size675);
    - for ($_i679 = 0; $_i679 < $_size675; ++$_i679)
    + $_size691 = 0;
    + $_etype694 = 0;
    + $xfer += $input->readListBegin($_etype694, $_size691);
    + for ($_i695 = 0; $_i695 < $_size691; ++$_i695)
                  {
    - $elem680 = null;
    - $xfer += $input->readString($elem680);
    - $this->group_names []= $elem680;
    + $elem696 = null;
    + $xfer += $input->readString($elem696);
    + $this->group_names []= $elem696;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -19654,9 +20138,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
            {
              $output->writeListBegin(TType::STRING, count($this->part_vals));
              {
    - foreach ($this->part_vals as $iter681)
    + foreach ($this->part_vals as $iter697)
                {
    - $xfer += $output->writeString($iter681);
    + $xfer += $output->writeString($iter697);
                }
              }
              $output->writeListEnd();
    @@ -19676,9 +20160,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
            {
              $output->writeListBegin(TType::STRING, count($this->group_names));
              {
    - foreach ($this->group_names as $iter682)
    + foreach ($this->group_names as $iter698)
                {
    - $xfer += $output->writeString($iter682);
    + $xfer += $output->writeString($iter698);
                }
              }
              $output->writeListEnd();
    @@ -20269,15 +20753,15 @@ class ThriftHiveMetastore_get_partitions_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size683 = 0;
    - $_etype686 = 0;
    - $xfer += $input->readListBegin($_etype686, $_size683);
    - for ($_i687 = 0; $_i687 < $_size683; ++$_i687)
    + $_size699 = 0;
    + $_etype702 = 0;
    + $xfer += $input->readListBegin($_etype702, $_size699);
    + for ($_i703 = 0; $_i703 < $_size699; ++$_i703)
                  {
    - $elem688 = null;
    - $elem688 = new \metastore\Partition();
    - $xfer += $elem688->read($input);
    - $this->success []= $elem688;
    + $elem704 = null;
    + $elem704 = new \metastore\Partition();
    + $xfer += $elem704->read($input);
    + $this->success []= $elem704;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -20321,9 +20805,9 @@ class ThriftHiveMetastore_get_partitions_result {
            {
              $output->writeListBegin(TType::STRUCT, count($this->success));
              {
    - foreach ($this->success as $iter689)
    + foreach ($this->success as $iter705)
                {
    - $xfer += $iter689->write($output);
    + $xfer += $iter705->write($output);
                }
              }
              $output->writeListEnd();
    @@ -20469,14 +20953,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
              case 5:
                if ($ftype == TType::LST) {
                  $this->group_names = array();
    - $_size690 = 0;
    - $_etype693 = 0;
    - $xfer += $input->readListBegin($_etype693, $_size690);
    - for ($_i694 = 0; $_i694 < $_size690; ++$_i694)
    + $_size706 = 0;
    + $_etype709 = 0;
    + $xfer += $input->readListBegin($_etype709, $_size706);
    + for ($_i710 = 0; $_i710 < $_size706; ++$_i710)
                  {
    - $elem695 = null;
    - $xfer += $input->readString($elem695);
    - $this->group_names []= $elem695;
    + $elem711 = null;
    + $xfer += $input->readString($elem711);
    + $this->group_names []= $elem711;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -20524,9 +21008,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
            {
              $output->writeListBegin(TType::STRING, count($this->group_names));
              {
    - foreach ($this->group_names as $iter696)
    + foreach ($this->group_names as $iter712)
                {
    - $xfer += $output->writeString($iter696);
    + $xfer += $output->writeString($iter712);
                }
              }
              $output->writeListEnd();
    @@ -20615,15 +21099,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size697 = 0;
    - $_etype700 = 0;
    - $xfer += $input->readListBegin($_etype700, $_size697);
    - for ($_i701 = 0; $_i701 < $_size697; ++$_i701)
    + $_size713 = 0;
    + $_etype716 = 0;
    + $xfer += $input->readListBegin($_etype716, $_size713);
    + for ($_i717 = 0; $_i717 < $_size713; ++$_i717)
                  {
    - $elem702 = null;
    - $elem702 = new \metastore\Partition();
    - $xfer += $elem702->read($input);
    - $this->success []= $elem702;
    + $elem718 = null;
    + $elem718 = new \metastore\Partition();
    + $xfer += $elem718->read($input);
    + $this->success []= $elem718;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -20667,9 +21151,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
            {
              $output->writeListBegin(TType::STRUCT, count($this->success));
              {
    - foreach ($this->success as $iter703)
    + foreach ($this->success as $iter719)
                {
    - $xfer += $iter703->write($output);
    + $xfer += $iter719->write($output);
                }
              }
              $output->writeListEnd();
    @@ -20889,15 +21373,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size704 = 0;
    - $_etype707 = 0;
    - $xfer += $input->readListBegin($_etype707, $_size704);
    - for ($_i708 = 0; $_i708 < $_size704; ++$_i708)
    + $_size720 = 0;
    + $_etype723 = 0;
    + $xfer += $input->readListBegin($_etype723, $_size720);
    + for ($_i724 = 0; $_i724 < $_size720; ++$_i724)
                  {
    - $elem709 = null;
    - $elem709 = new \metastore\PartitionSpec();
    - $xfer += $elem709->read($input);
    - $this->success []= $elem709;
    + $elem725 = null;
    + $elem725 = new \metastore\PartitionSpec();
    + $xfer += $elem725->read($input);
    + $this->success []= $elem725;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -20941,9 +21425,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
            {
              $output->writeListBegin(TType::STRUCT, count($this->success));
              {
    - foreach ($this->success as $iter710)
    + foreach ($this->success as $iter726)
                {
    - $xfer += $iter710->write($output);
    + $xfer += $iter726->write($output);
                }
              }
              $output->writeListEnd();
    @@ -21150,14 +21634,14 @@ class ThriftHiveMetastore_get_partition_names_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size711 = 0;
    - $_etype714 = 0;
    - $xfer += $input->readListBegin($_etype714, $_size711);
    - for ($_i715 = 0; $_i715 < $_size711; ++$_i715)
    + $_size727 = 0;
    + $_etype730 = 0;
    + $xfer += $input->readListBegin($_etype730, $_size727);
    + for ($_i731 = 0; $_i731 < $_size727; ++$_i731)
                  {
    - $elem716 = null;
    - $xfer += $input->readString($elem716);
    - $this->success []= $elem716;
    + $elem732 = null;
    + $xfer += $input->readString($elem732);
    + $this->success []= $elem732;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -21193,9 +21677,9 @@ class ThriftHiveMetastore_get_partition_names_result {
            {
              $output->writeListBegin(TType::STRING, count($this->success));
              {
    - foreach ($this->success as $iter717)
    + foreach ($this->success as $iter733)
                {
    - $xfer += $output->writeString($iter717);
    + $xfer += $output->writeString($iter733);
                }
              }
              $output->writeListEnd();
    @@ -21311,14 +21795,14 @@ class ThriftHiveMetastore_get_partitions_ps_args {
              case 3:
                if ($ftype == TType::LST) {
                  $this->part_vals = array();
    - $_size718 = 0;
    - $_etype721 = 0;
    - $xfer += $input->readListBegin($_etype721, $_size718);
    - for ($_i722 = 0; $_i722 < $_size718; ++$_i722)
    + $_size734 = 0;
    + $_etype737 = 0;
    + $xfer += $input->readListBegin($_etype737, $_size734);
    + for ($_i738 = 0; $_i738 < $_size734; ++$_i738)
                  {
    - $elem723 = null;
    - $xfer += $input->readString($elem723);
    - $this->part_vals []= $elem723;
    + $elem739 = null;
    + $xfer += $input->readString($elem739);
    + $this->part_vals []= $elem739;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -21363,9 +21847,9 @@ class ThriftHiveMetastore_get_partitions_ps_args {
            {
              $output->writeListBegin(TType::STRING, count($this->part_vals));
              {
    - foreach ($this->part_vals as $iter724)
    + foreach ($this->part_vals as $iter740)
                {
    - $xfer += $output->writeString($iter724);
    + $xfer += $output->writeString($iter740);
                }
              }
              $output->writeListEnd();
    @@ -21459,15 +21943,15 @@ class ThriftHiveMetastore_get_partitions_ps_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size725 = 0;
    - $_etype728 = 0;
    - $xfer += $input->readListBegin($_etype728, $_size725);
    - for ($_i729 = 0; $_i729 < $_size725; ++$_i729)
    + $_size741 = 0;
    + $_etype744 = 0;
    + $xfer += $input->readListBegin($_etype744, $_size741);
    + for ($_i745 = 0; $_i745 < $_size741; ++$_i745)
                  {
    - $elem730 = null;
    - $elem730 = new \metastore\Partition();
    - $xfer += $elem730->read($input);
    - $this->success []= $elem730;
    + $elem746 = null;
    + $elem746 = new \metastore\Partition();
    + $xfer += $elem746->read($input);
    + $this->success []= $elem746;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -21511,9 +21995,9 @@ class ThriftHiveMetastore_get_partitions_ps_result {
            {
              $output->writeListBegin(TType::STRUCT, count($this->success));
              {
    - foreach ($this->success as $iter731)
    + foreach ($this->success as $iter747)
                {
    - $xfer += $iter731->write($output);
    + $xfer += $iter747->write($output);
                }
              }
              $output->writeListEnd();
    @@ -21660,14 +22144,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
              case 3:
                if ($ftype == TType::LST) {
                  $this->part_vals = array();
    - $_size732 = 0;
    - $_etype735 = 0;
    - $xfer += $input->readListBegin($_etype735, $_size732);
    - for ($_i736 = 0; $_i736 < $_size732; ++$_i736)
    + $_size748 = 0;
    + $_etype751 = 0;
    + $xfer += $input->readListBegin($_etype751, $_size748);
    + for ($_i752 = 0; $_i752 < $_size748; ++$_i752)
                  {
    - $elem737 = null;
    - $xfer += $input->readString($elem737);
    - $this->part_vals []= $elem737;
    + $elem753 = null;
    + $xfer += $input->readString($elem753);
    + $this->part_vals []= $elem753;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -21691,14 +22175,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
              case 6:
                if ($ftype == TType::LST) {
                  $this->group_names = array();
    - $_size738 = 0;
    - $_etype741 = 0;
    - $xfer += $input->readListBegin($_etype741, $_size738);
    - for ($_i742 = 0; $_i742 < $_size738; ++$_i742)
    + $_size754 = 0;
    + $_etype757 = 0;
    + $xfer += $input->readListBegin($_etype757, $_size754);
    + for ($_i758 = 0; $_i758 < $_size754; ++$_i758)
                  {
    - $elem743 = null;
    - $xfer += $input->readString($elem743);
    - $this->group_names []= $elem743;
    + $elem759 = null;
    + $xfer += $input->readString($elem759);
    + $this->group_names []= $elem759;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -21736,9 +22220,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
            {
              $output->writeListBegin(TType::STRING, count($this->part_vals));
              {
    - foreach ($this->part_vals as $iter744)
    + foreach ($this->part_vals as $iter760)
                {
    - $xfer += $output->writeString($iter744);
    + $xfer += $output->writeString($iter760);
                }
              }
              $output->writeListEnd();
    @@ -21763,9 +22247,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
            {
              $output->writeListBegin(TType::STRING, count($this->group_names));
              {
    - foreach ($this->group_names as $iter745)
    + foreach ($this->group_names as $iter761)
                {
    - $xfer += $output->writeString($iter745);
    + $xfer += $output->writeString($iter761);
                }
              }
              $output->writeListEnd();
    @@ -21854,15 +22338,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size746 = 0;
    - $_etype749 = 0;
    - $xfer += $input->readListBegin($_etype749, $_size746);
    - for ($_i750 = 0; $_i750 < $_size746; ++$_i750)
    + $_size762 = 0;
    + $_etype765 = 0;
    + $xfer += $input->readListBegin($_etype765, $_size762);
    + for ($_i766 = 0; $_i766 < $_size762; ++$_i766)
                  {
    - $elem751 = null;
    - $elem751 = new \metastore\Partition();
    - $xfer += $elem751->read($input);
    - $this->success []= $elem751;
    + $elem767 = null;
    + $elem767 = new \metastore\Partition();
    + $xfer += $elem767->read($input);
    + $this->success []= $elem767;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -21906,9 +22390,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
            {
              $output->writeListBegin(TType::STRUCT, count($this->success));
              {
    - foreach ($this->success as $iter752)
    + foreach ($this->success as $iter768)
                {
    - $xfer += $iter752->write($output);
    + $xfer += $iter768->write($output);
                }
              }
              $output->writeListEnd();
    @@ -22029,14 +22513,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
              case 3:
                if ($ftype == TType::LST) {
                  $this->part_vals = array();
    - $_size753 = 0;
    - $_etype756 = 0;
    - $xfer += $input->readListBegin($_etype756, $_size753);
    - for ($_i757 = 0; $_i757 < $_size753; ++$_i757)
    + $_size769 = 0;
    + $_etype772 = 0;
    + $xfer += $input->readListBegin($_etype772, $_size769);
    + for ($_i773 = 0; $_i773 < $_size769; ++$_i773)
                  {
    - $elem758 = null;
    - $xfer += $input->readString($elem758);
    - $this->part_vals []= $elem758;
    + $elem774 = null;
    + $xfer += $input->readString($elem774);
    + $this->part_vals []= $elem774;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -22081,9 +22565,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
            {
              $output->writeListBegin(TType::STRING, count($this->part_vals));
              {
    - foreach ($this->part_vals as $iter759)
    + foreach ($this->part_vals as $iter775)
                {
    - $xfer += $output->writeString($iter759);
    + $xfer += $output->writeString($iter775);
                }
              }
              $output->writeListEnd();
    @@ -22176,14 +22660,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size760 = 0;
    - $_etype763 = 0;
    - $xfer += $input->readListBegin($_etype763, $_size760);
    - for ($_i764 = 0; $_i764 < $_size760; ++$_i764)
    + $_size776 = 0;
    + $_etype779 = 0;
    + $xfer += $input->readListBegin($_etype779, $_size776);
    + for ($_i780 = 0; $_i780 < $_size776; ++$_i780)
                  {
    - $elem765 = null;
    - $xfer += $input->readString($elem765);
    - $this->success []= $elem765;
    + $elem781 = null;
    + $xfer += $input->readString($elem781);
    + $this->success []= $elem781;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -22227,9 +22711,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
            {
              $output->writeListBegin(TType::STRING, count($this->success));
              {
    - foreach ($this->success as $iter766)
    + foreach ($this->success as $iter782)
                {
    - $xfer += $output->writeString($iter766);
    + $xfer += $output->writeString($iter782);
                }
              }
              $output->writeListEnd();
    @@ -22472,15 +22956,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size767 = 0;
    - $_etype770 = 0;
    - $xfer += $input->readListBegin($_etype770, $_size767);
    - for ($_i771 = 0; $_i771 < $_size767; ++$_i771)
    + $_size783 = 0;
    + $_etype786 = 0;
    + $xfer += $input->readListBegin($_etype786, $_size783);
    + for ($_i787 = 0; $_i787 < $_size783; ++$_i787)
                  {
    - $elem772 = null;
    - $elem772 = new \metastore\Partition();
    - $xfer += $elem772->read($input);
    - $this->success []= $elem772;
    + $elem788 = null;
    + $elem788 = new \metastore\Partition();
    + $xfer += $elem788->read($input);
    + $this->success []= $elem788;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -22524,9 +23008,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
            {
              $output->writeListBegin(TType::STRUCT, count($this->success));
              {
    - foreach ($this->success as $iter773)
    + foreach ($this->success as $iter789)
                {
    - $xfer += $iter773->write($output);
    + $xfer += $iter789->write($output);
                }
              }
              $output->writeListEnd();
    @@ -22769,15 +23253,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size774 = 0;
    - $_etype777 = 0;
    - $xfer += $input->readListBegin($_etype777, $_size774);
    - for ($_i778 = 0; $_i778 < $_size774; ++$_i778)
    + $_size790 = 0;
    + $_etype793 = 0;
    + $xfer += $input->readListBegin($_etype793, $_size790);
    + for ($_i794 = 0; $_i794 < $_size790; ++$_i794)
                  {
    - $elem779 = null;
    - $elem779 = new \metastore\PartitionSpec();
    - $xfer += $elem779->read($input);
    - $this->success []= $elem779;
    + $elem795 = null;
    + $elem795 = new \metastore\PartitionSpec();
    + $xfer += $elem795->read($input);
    + $this->success []= $elem795;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -22821,9 +23305,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
            {
              $output->writeListBegin(TType::STRUCT, count($this->success));
              {
    - foreach ($this->success as $iter780)
    + foreach ($this->success as $iter796)
                {
    - $xfer += $iter780->write($output);
    + $xfer += $iter796->write($output);
                }
              }
              $output->writeListEnd();
    @@ -23143,14 +23627,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
              case 3:
                if ($ftype == TType::LST) {
                  $this->names = array();
    - $_size781 = 0;
    - $_etype784 = 0;
    - $xfer += $input->readListBegin($_etype784, $_size781);
    - for ($_i785 = 0; $_i785 < $_size781; ++$_i785)
    + $_size797 = 0;
    + $_etype800 = 0;
    + $xfer += $input->readListBegin($_etype800, $_size797);
    + for ($_i801 = 0; $_i801 < $_size797; ++$_i801)
                  {
    - $elem786 = null;
    - $xfer += $input->readString($elem786);
    - $this->names []= $elem786;
    + $elem802 = null;
    + $xfer += $input->readString($elem802);
    + $this->names []= $elem802;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -23188,9 +23672,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
            {
              $output->writeListBegin(TType::STRING, count($this->names));
              {
    - foreach ($this->names as $iter787)
    + foreach ($this->names as $iter803)
                {
    - $xfer += $output->writeString($iter787);
    + $xfer += $output->writeString($iter803);
                }
              }
              $output->writeListEnd();
    @@ -23279,15 +23763,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size788 = 0;
    - $_etype791 = 0;
    - $xfer += $input->readListBegin($_etype791, $_size788);
    - for ($_i792 = 0; $_i792 < $_size788; ++$_i792)
    + $_size804 = 0;
    + $_etype807 = 0;
    + $xfer += $input->readListBegin($_etype807, $_size804);
    + for ($_i808 = 0; $_i808 < $_size804; ++$_i808)
                  {
    - $elem793 = null;
    - $elem793 = new \metastore\Partition();
    - $xfer += $elem793->read($input);
    - $this->success []= $elem793;
    + $elem809 = null;
    + $elem809 = new \metastore\Partition();
    + $xfer += $elem809->read($input);
    + $this->success []= $elem809;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -23331,9 +23815,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
            {
              $output->writeListBegin(TType::STRUCT, count($this->success));
              {
    - foreach ($this->success as $iter794)
    + foreach ($this->success as $iter810)
                {
    - $xfer += $iter794->write($output);
    + $xfer += $iter810->write($output);
                }
              }
              $output->writeListEnd();
    @@ -23672,15 +24156,15 @@ class ThriftHiveMetastore_alter_partitions_args {
              case 3:
                if ($ftype == TType::LST) {
                  $this->new_parts = array();
    - $_size795 = 0;
    - $_etype798 = 0;
    - $xfer += $input->readListBegin($_etype798, $_size795);
    - for ($_i799 = 0; $_i799 < $_size795; ++$_i799)
    + $_size811 = 0;
    + $_etype814 = 0;
    + $xfer += $input->readListBegin($_etype814, $_size811);
    + for ($_i815 = 0; $_i815 < $_size811; ++$_i815)
                  {
    - $elem800 = null;
    - $elem800 = new \metastore\Partition();
    - $xfer += $elem800->read($input);
    - $this->new_parts []= $elem800;
    + $elem816 = null;
    + $elem816 = new \metastore\Partition();
    + $xfer += $elem816->read($input);
    + $this->new_parts []= $elem816;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -23718,9 +24202,9 @@ class ThriftHiveMetastore_alter_partitions_args {
            {
              $output->writeListBegin(TType::STRUCT, count($this->new_parts));
              {
    - foreach ($this->new_parts as $iter801)
    + foreach ($this->new_parts as $iter817)
                {
    - $xfer += $iter801->write($output);
    + $xfer += $iter817->write($output);
                }
              }
              $output->writeListEnd();
    @@ -24190,14 +24674,14 @@ class ThriftHiveMetastore_rename_partition_args {
              case 3:
                if ($ftype == TType::LST) {
                  $this->part_vals = array();
    - $_size802 = 0;
    - $_etype805 = 0;
    - $xfer += $input->readListBegin($_etype805, $_size802);
    - for ($_i806 = 0; $_i806 < $_size802; ++$_i806)
    + $_size818 = 0;
    + $_etype821 = 0;
    + $xfer += $input->readListBegin($_etype821, $_size818);
    + for ($_i822 = 0; $_i822 < $_size818; ++$_i822)
                  {
    - $elem807 = null;
    - $xfer += $input->readString($elem807);
    - $this->part_vals []= $elem807;
    + $elem823 = null;
    + $xfer += $input->readString($elem823);
    + $this->part_vals []= $elem823;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -24243,9 +24727,9 @@ class ThriftHiveMetastore_rename_partition_args {
            {
              $output->writeListBegin(TType::STRING, count($this->part_vals));
              {
    - foreach ($this->part_vals as $iter808)
    + foreach ($this->part_vals as $iter824)
                {
    - $xfer += $output->writeString($iter808);
    + $xfer += $output->writeString($iter824);
                }
              }
              $output->writeListEnd();
    @@ -24430,14 +24914,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args {
              case 1:
                if ($ftype == TType::LST) {
                  $this->part_vals = array();
    - $_size809 = 0;
    - $_etype812 = 0;
    - $xfer += $input->readListBegin($_etype812, $_size809);
    - for ($_i813 = 0; $_i813 < $_size809; ++$_i813)
    + $_size825 = 0;
    + $_etype828 = 0;
    + $xfer += $input->readListBegin($_etype828, $_size825);
    + for ($_i829 = 0; $_i829 < $_size825; ++$_i829)
                  {
    - $elem814 = null;
    - $xfer += $input->readString($elem814);
    - $this->part_vals []= $elem814;
    + $elem830 = null;
    + $xfer += $input->readString($elem830);
    + $this->part_vals []= $elem830;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -24472,9 +24956,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args {
            {
              $output->writeListBegin(TType::STRING, count($this->part_vals));
              {
    - foreach ($this->part_vals as $iter815)
    + foreach ($this->part_vals as $iter831)
                {
    - $xfer += $output->writeString($iter815);
    + $xfer += $output->writeString($iter831);
                }
              }
              $output->writeListEnd();
    @@ -24928,14 +25412,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size816 = 0;
    - $_etype819 = 0;
    - $xfer += $input->readListBegin($_etype819, $_size816);
    - for ($_i820 = 0; $_i820 < $_size816; ++$_i820)
    + $_size832 = 0;
    + $_etype835 = 0;
    + $xfer += $input->readListBegin($_etype835, $_size832);
    + for ($_i836 = 0; $_i836 < $_size832; ++$_i836)
                  {
    - $elem821 = null;
    - $xfer += $input->readString($elem821);
    - $this->success []= $elem821;
    + $elem837 = null;
    + $xfer += $input->readString($elem837);
    + $this->success []= $elem837;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -24971,9 +25455,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result {
            {
              $output->writeListBegin(TType::STRING, count($this->success));
              {
    - foreach ($this->success as $iter822)
    + foreach ($this->success as $iter838)
                {
    - $xfer += $output->writeString($iter822);
    + $xfer += $output->writeString($iter838);
                }
              }
              $output->writeListEnd();
    @@ -25133,17 +25617,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result {
              case 0:
                if ($ftype == TType::MAP) {
                  $this->success = array();
    - $_size823 = 0;
    - $_ktype824 = 0;
    - $_vtype825 = 0;
    - $xfer += $input->readMapBegin($_ktype824, $_vtype825, $_size823);
    - for ($_i827 = 0; $_i827 < $_size823; ++$_i827)
    + $_size839 = 0;
    + $_ktype840 = 0;
    + $_vtype841 = 0;
    + $xfer += $input->readMapBegin($_ktype840, $_vtype841, $_size839);
    + for ($_i843 = 0; $_i843 < $_size839; ++$_i843)
                  {
    - $key828 = '';
    - $val829 = '';
    - $xfer += $input->readString($key828);
    - $xfer += $input->readString($val829);
    - $this->success[$key828] = $val829;
    + $key844 = '';
    + $val845 = '';
    + $xfer += $input->readString($key844);
    + $xfer += $input->readString($val845);
    + $this->success[$key844] = $val845;
                  }
                  $xfer += $input->readMapEnd();
                } else {
    @@ -25179,10 +25663,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result {
            {
              $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success));
              {
    - foreach ($this->success as $kiter830 => $viter831)
    + foreach ($this->success as $kiter846 => $viter847)
                {
    - $xfer += $output->writeString($kiter830);
    - $xfer += $output->writeString($viter831);
    + $xfer += $output->writeString($kiter846);
    + $xfer += $output->writeString($viter847);
                }
              }
              $output->writeMapEnd();
    @@ -25302,17 +25786,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args {
              case 3:
                if ($ftype == TType::MAP) {
                  $this->part_vals = array();
    - $_size832 = 0;
    - $_ktype833 = 0;
    - $_vtype834 = 0;
    - $xfer += $input->readMapBegin($_ktype833, $_vtype834, $_size832);
    - for ($_i836 = 0; $_i836 < $_size832; ++$_i836)
    + $_size848 = 0;
    + $_ktype849 = 0;
    + $_vtype850 = 0;
    + $xfer += $input->readMapBegin($_ktype849, $_vtype850, $_size848);
    + for ($_i852 = 0; $_i852 < $_size848; ++$_i852)
                  {
    - $key837 = '';
    - $val838 = '';
    - $xfer += $input->readString($key837);
    - $xfer += $input->readString($val838);
    - $this->part_vals[$key837] = $val838;
    + $key853 = '';
    + $val854 = '';
    + $xfer += $input->readString($key853);
    + $xfer += $input->readString($val854);
    + $this->part_vals[$key853] = $val854;
                  }
                  $xfer += $input->readMapEnd();
                } else {
    @@ -25357,10 +25841,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args {
            {
              $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
              {
    - foreach ($this->part_vals as $kiter839 => $viter840)
    + foreach ($this->part_vals as $kiter855 => $viter856)
                {
    - $xfer += $output->writeString($kiter839);
    - $xfer += $output->writeString($viter840);
    + $xfer += $output->writeString($kiter855);
    + $xfer += $output->writeString($viter856);
                }
              }
              $output->writeMapEnd();
    @@ -25682,17 +26166,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args {
              case 3:
                if ($ftype == TType::MAP) {
                  $this->part_vals = array();
    - $_size841 = 0;
    - $_ktype842 = 0;
    - $_vtype843 = 0;
    - $xfer += $input->readMapBegin($_ktype842, $_vtype843, $_size841);
    - for ($_i845 = 0; $_i845 < $_size841; ++$_i845)
    + $_size857 = 0;
    + $_ktype858 = 0;
    + $_vtype859 = 0;
    + $xfer += $input->readMapBegin($_ktype858, $_vtype859, $_size857);
    + for ($_i861 = 0; $_i861 < $_size857; ++$_i861)
                  {
    - $key846 = '';
    - $val847 = '';
    - $xfer += $input->readString($key846);
    - $xfer += $input->readString($val847);
    - $this->part_vals[$key846] = $val847;
    + $key862 = '';
    + $val863 = '';
    + $xfer += $input->readString($key862);
    + $xfer += $input->readString($val863);
    + $this->part_vals[$key862] = $val863;
                  }
                  $xfer += $input->readMapEnd();
                } else {
    @@ -25737,10 +26221,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args {
            {
              $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals));
              {
    - foreach ($this->part_vals as $kiter848 => $viter849)
    + foreach ($this->part_vals as $kiter864 => $viter865)
                {
    - $xfer += $output->writeString($kiter848);
    - $xfer += $output->writeString($viter849);
    + $xfer += $output->writeString($kiter864);
    + $xfer += $output->writeString($viter865);
                }
              }
              $output->writeMapEnd();
    @@ -27214,15 +27698,15 @@ class ThriftHiveMetastore_get_indexes_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size850 = 0;
    - $_etype853 = 0;
    - $xfer += $input->readListBegin($_etype853, $_size850);
    - for ($_i854 = 0; $_i854 < $_size850; ++$_i854)
    + $_size866 = 0;
    + $_etype869 = 0;
    + $xfer += $input->readListBegin($_etype869, $_size866);
    + for ($_i870 = 0; $_i870 < $_size866; ++$_i870)
                  {
    - $elem855 = null;
    - $elem855 = new \metastore\Index();
    - $xfer += $elem855->read($input);
    - $this->success []= $elem855;
    + $elem871 = null;
    + $elem871 = new \metastore\Index();
    + $xfer += $elem871->read($input);
    + $this->success []= $elem871;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -27266,9 +27750,9 @@ class ThriftHiveMetastore_get_indexes_result {
            {
              $output->writeListBegin(TType::STRUCT, count($this->success));
              {
    - foreach ($this->success as $iter856)
    + foreach ($this->success as $iter872)
                {
    - $xfer += $iter856->write($output);
    + $xfer += $iter872->write($output);
                }
              }
              $output->writeListEnd();
    @@ -27475,14 +27959,14 @@ class ThriftHiveMetastore_get_index_names_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size857 = 0;
    - $_etype860 = 0;
    - $xfer += $input->readListBegin($_etype860, $_size857);
    - for ($_i861 = 0; $_i861 < $_size857; ++$_i861)
    + $_size873 = 0;
    + $_etype876 = 0;
    + $xfer += $input->readListBegin($_etype876, $_size873);
    + for ($_i877 = 0; $_i877 < $_size873; ++$_i877)
                  {
    - $elem862 = null;
    - $xfer += $input->readString($elem862);
    - $this->success []= $elem862;
    + $elem878 = null;
    + $xfer += $input->readString($elem878);
    + $this->success []= $elem878;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -27518,9 +28002,9 @@ class ThriftHiveMetastore_get_index_names_result {
            {
              $output->writeListBegin(TType::STRING, count($this->success));
              {
    - foreach ($this->success as $iter863)
    + foreach ($this->success as $iter879)
                {
    - $xfer += $output->writeString($iter863);
    + $xfer += $output->writeString($iter879);
                }
              }
              $output->writeListEnd();
    @@ -30994,14 +31478,14 @@ class ThriftHiveMetastore_get_functions_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size864 = 0;
    - $_etype867 = 0;
    - $xfer += $input->readListBegin($_etype867, $_size864);
    - for ($_i868 = 0; $_i868 < $_size864; ++$_i868)
    + $_size880 = 0;
    + $_etype883 = 0;
    + $xfer += $input->readListBegin($_etype883, $_size880);
    + for ($_i884 = 0; $_i884 < $_size880; ++$_i884)
                  {
    - $elem869 = null;
    - $xfer += $input->readString($elem869);
    - $this->success []= $elem869;
    + $elem885 = null;
    + $xfer += $input->readString($elem885);
    + $this->success []= $elem885;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -31037,9 +31521,9 @@ class ThriftHiveMetastore_get_functions_result {
            {
              $output->writeListBegin(TType::STRING, count($this->success));
              {
    - foreach ($this->success as $iter870)
    + foreach ($this->success as $iter886)
                {
    - $xfer += $output->writeString($iter870);
    + $xfer += $output->writeString($iter886);
                }
              }
              $output->writeListEnd();
    @@ -31908,14 +32392,14 @@ class ThriftHiveMetastore_get_role_names_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size871 = 0;
    - $_etype874 = 0;
    - $xfer += $input->readListBegin($_etype874, $_size871);
    - for ($_i875 = 0; $_i875 < $_size871; ++$_i875)
    + $_size887 = 0;
    + $_etype890 = 0;
    + $xfer += $input->readListBegin($_etype890, $_size887);
    + for ($_i891 = 0; $_i891 < $_size887; ++$_i891)
                  {
    - $elem876 = null;
    - $xfer += $input->readString($elem876);
    - $this->success []= $elem876;
    + $elem892 = null;
    + $xfer += $input->readString($elem892);
    + $this->success []= $elem892;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -31951,9 +32435,9 @@ class ThriftHiveMetastore_get_role_names_result {
            {
              $output->writeListBegin(TType::STRING, count($this->success));
              {
    - foreach ($this->success as $iter877)
    + foreach ($this->success as $iter893)
                {
    - $xfer += $output->writeString($iter877);
    + $xfer += $output->writeString($iter893);
                }
              }
              $output->writeListEnd();
    @@ -32644,15 +33128,15 @@ class ThriftHiveMetastore_list_roles_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size878 = 0;
    - $_etype881 = 0;
    - $xfer += $input->readListBegin($_etype881, $_size878);
    - for ($_i882 = 0; $_i882 < $_size878; ++$_i882)
    + $_size894 = 0;
    + $_etype897 = 0;
    + $xfer += $input->readListBegin($_etype897, $_size894);
    + for ($_i898 = 0; $_i898 < $_size894; ++$_i898)
                  {
    - $elem883 = null;
    - $elem883 = new \metastore\Role();
    - $xfer += $elem883->read($input);
    - $this->success []= $elem883;
    + $elem899 = null;
    + $elem899 = new \metastore\Role();
    + $xfer += $elem899->read($input);
    + $this->success []= $elem899;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -32688,9 +33172,9 @@ class ThriftHiveMetastore_list_roles_result {
            {
              $output->writeListBegin(TType::STRUCT, count($this->success));
              {
    - foreach ($this->success as $iter884)
    + foreach ($this->success as $iter900)
                {
    - $xfer += $iter884->write($output);
    + $xfer += $iter900->write($output);
                }
              }
              $output->writeListEnd();
    @@ -33352,14 +33836,14 @@ class ThriftHiveMetastore_get_privilege_set_args {
              case 3:
                if ($ftype == TType::LST) {
                  $this->group_names = array();
    - $_size885 = 0;
    - $_etype888 = 0;
    - $xfer += $input->readListBegin($_etype888, $_size885);
    - for ($_i889 = 0; $_i889 < $_size885; ++$_i889)
    + $_size901 = 0;
    + $_etype904 = 0;
    + $xfer += $input->readListBegin($_etype904, $_size901);
    + for ($_i905 = 0; $_i905 < $_size901; ++$_i905)
                  {
    - $elem890 = null;
    - $xfer += $input->readString($elem890);
    - $this->group_names []= $elem890;
    + $elem906 = null;
    + $xfer += $input->readString($elem906);
    + $this->group_names []= $elem906;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -33400,9 +33884,9 @@ class ThriftHiveMetastore_get_privilege_set_args {
            {
              $output->writeListBegin(TType::STRING, count($this->group_names));
              {
    - foreach ($this->group_names as $iter891)
    + foreach ($this->group_names as $iter907)
                {
    - $xfer += $output->writeString($iter891);
    + $xfer += $output->writeString($iter907);
                }
              }
              $output->writeListEnd();
    @@ -33710,15 +34194,15 @@ class ThriftHiveMetastore_list_privileges_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size892 = 0;
    - $_etype895 = 0;
    - $xfer += $input->readListBegin($_etype895, $_size892);
    - for ($_i896 = 0; $_i896 < $_size892; ++$_i896)
    + $_size908 = 0;
    + $_etype911 = 0;
    + $xfer += $input->readListBegin($_etype911, $_size908);
    + for ($_i912 = 0; $_i912 < $_size908; ++$_i912)
                  {
    - $elem897 = null;
    - $elem897 = new \metastore\HiveObjectPrivilege();
    - $xfer += $elem897->read($input);
    - $this->success []= $elem897;
    + $elem913 = null;
    + $elem913 = new \metastore\HiveObjectPrivilege();
    + $xfer += $elem913->read($input);
    + $this->success []= $elem913;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -33754,9 +34238,9 @@ class ThriftHiveMetastore_list_privileges_result {
            {
              $output->writeListBegin(TType::STRUCT, count($this->success));
              {
    - foreach ($this->success as $iter898)
    + foreach ($this->success as $iter914)
                {
    - $xfer += $iter898->write($output);
    + $xfer += $iter914->write($output);
                }
              }
              $output->writeListEnd();
    @@ -34388,14 +34872,14 @@ class ThriftHiveMetastore_set_ugi_args {
              case 2:
                if ($ftype == TType::LST) {
                  $this->group_names = array();
    - $_size899 = 0;
    - $_etype902 = 0;
    - $xfer += $input->readListBegin($_etype902, $_size899);
    - for ($_i903 = 0; $_i903 < $_size899; ++$_i903)
    + $_size915 = 0;
    + $_etype918 = 0;
    + $xfer += $input->readListBegin($_etype918, $_size915);
    + for ($_i919 = 0; $_i919 < $_size915; ++$_i919)
                  {
    - $elem904 = null;
    - $xfer += $input->readString($elem904);
    - $this->group_names []= $elem904;
    + $elem920 = null;
    + $xfer += $input->readString($elem920);
    + $this->group_names []= $elem920;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -34428,9 +34912,9 @@ class ThriftHiveMetastore_set_ugi_args {
            {
              $output->writeListBegin(TType::STRING, count($this->group_names));
              {
    - foreach ($this->group_names as $iter905)
    + foreach ($this->group_names as $iter921)
                {
    - $xfer += $output->writeString($iter905);
    + $xfer += $output->writeString($iter921);
                }
              }
              $output->writeListEnd();
    @@ -34506,14 +34990,14 @@ class ThriftHiveMetastore_set_ugi_result {
              case 0:
                if ($ftype == TType::LST) {
                  $this->success = array();
    - $_size906 = 0;
    - $_etype909 = 0;
    - $xfer += $input->readListBegin($_etype909, $_size906);
    - for ($_i910 = 0; $_i910 < $_size906; ++$_i910)
    + $_size922 = 0;
    + $_etype925 = 0;
    + $xfer += $input->readListBegin($_etype925, $_size922);
    + for ($_i926 = 0; $_i926 < $_size922; ++$_i926)
                  {
    - $elem911 = null;
    - $xfer += $input->readString($elem911);
    - $this->success []= $elem911;
    + $elem927 = null;
    + $xfer += $input->readString($elem927);
    + $this->success []= $elem927;
                  }
                  $xfer += $input->readListEnd();
                } else {
    @@ -34549,9 +35033,9 @@ class ThriftHiveMetastore_set_ugi_result {
            {
              $output->writeListBegin(TType::STRING, count($this->success));
              {
    - foreach ($this->success as $iter912)
    + foreach ($this->success as $iter928)
                {
    - $xfer += $output->writeString($iter912);
    + $xfer += $output->writeString($iter928);
                }
              }
              $output->writeListEnd();

    http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    index 8dba17b..65ba10e 100755
    --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    @@ -68,6 +68,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
        print(' DropPartitionsResult drop_partitions_req(DropPartitionsRequest req)')
        print(' Partition get_partition(string db_name, string tbl_name, part_vals)')
        print(' Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)')
    + print(' exchange_partitions( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)')
        print(' Partition get_partition_with_auth(string db_name, string tbl_name, part_vals, string user_name, group_names)')
        print(' Partition get_partition_by_name(string db_name, string tbl_name, string part_name)')
        print(' get_partitions(string db_name, string tbl_name, i16 max_parts)')
    @@ -486,6 +487,12 @@ elif cmd == 'exchange_partition':
          sys.exit(1)
        pp.pprint(client.exchange_partition(eval(args[0]),args[1],args[2],args[3],args[4],))

    +elif cmd == 'exchange_partitions':
    + if len(args) != 5:
    + print('exchange_partitions requires 5 args')
    + sys.exit(1)
    + pp.pprint(client.exchange_partitions(eval(args[0]),args[1],args[2],args[3],args[4],))
    +
      elif cmd == 'get_partition_with_auth':
        if len(args) != 5:
          print('get_partition_with_auth requires 5 args')
  • Jxiang at Nov 6, 2015 at 5:32 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/55a24f0a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
    ----------------------------------------------------------------------
    diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
    index a82c363..6a80db7 100644
    --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
    +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
    @@ -11672,6 +11672,402 @@ uint32_t ThriftHiveMetastore_exchange_partition_presult::read(::apache::thrift::
      }


    +ThriftHiveMetastore_exchange_partitions_args::~ThriftHiveMetastore_exchange_partitions_args() throw() {
    +}
    +
    +
    +uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::protocol::TProtocol* iprot) {
    +
    + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
    + uint32_t xfer = 0;
    + std::string fname;
    + ::apache::thrift::protocol::TType ftype;
    + int16_t fid;
    +
    + xfer += iprot->readStructBegin(fname);
    +
    + using ::apache::thrift::protocol::TProtocolException;
    +
    +
    + while (true)
    + {
    + xfer += iprot->readFieldBegin(fname, ftype, fid);
    + if (ftype == ::apache::thrift::protocol::T_STOP) {
    + break;
    + }
    + switch (fid)
    + {
    + case 1:
    + if (ftype == ::apache::thrift::protocol::T_MAP) {
    + {
    + this->partitionSpecs.clear();
    + uint32_t _size904;
    + ::apache::thrift::protocol::TType _ktype905;
    + ::apache::thrift::protocol::TType _vtype906;
    + xfer += iprot->readMapBegin(_ktype905, _vtype906, _size904);
    + uint32_t _i908;
    + for (_i908 = 0; _i908 < _size904; ++_i908)
    + {
    + std::string _key909;
    + xfer += iprot->readString(_key909);
    + std::string& _val910 = this->partitionSpecs[_key909];
    + xfer += iprot->readString(_val910);
    + }
    + xfer += iprot->readMapEnd();
    + }
    + this->__isset.partitionSpecs = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + case 2:
    + if (ftype == ::apache::thrift::protocol::T_STRING) {
    + xfer += iprot->readString(this->source_db);
    + this->__isset.source_db = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + case 3:
    + if (ftype == ::apache::thrift::protocol::T_STRING) {
    + xfer += iprot->readString(this->source_table_name);
    + this->__isset.source_table_name = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + case 4:
    + if (ftype == ::apache::thrift::protocol::T_STRING) {
    + xfer += iprot->readString(this->dest_db);
    + this->__isset.dest_db = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + case 5:
    + if (ftype == ::apache::thrift::protocol::T_STRING) {
    + xfer += iprot->readString(this->dest_table_name);
    + this->__isset.dest_table_name = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + default:
    + xfer += iprot->skip(ftype);
    + break;
    + }
    + xfer += iprot->readFieldEnd();
    + }
    +
    + xfer += iprot->readStructEnd();
    +
    + return xfer;
    +}
    +
    +uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
    + uint32_t xfer = 0;
    + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
    + xfer += oprot->writeStructBegin("ThriftHiveMetastore_exchange_partitions_args");
    +
    + xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
    + {
    + xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
    + std::map<std::string, std::string> ::const_iterator _iter911;
    + for (_iter911 = this->partitionSpecs.begin(); _iter911 != this->partitionSpecs.end(); ++_iter911)
    + {
    + xfer += oprot->writeString(_iter911->first);
    + xfer += oprot->writeString(_iter911->second);
    + }
    + xfer += oprot->writeMapEnd();
    + }
    + xfer += oprot->writeFieldEnd();
    +
    + xfer += oprot->writeFieldBegin("source_db", ::apache::thrift::protocol::T_STRING, 2);
    + xfer += oprot->writeString(this->source_db);
    + xfer += oprot->writeFieldEnd();
    +
    + xfer += oprot->writeFieldBegin("source_table_name", ::apache::thrift::protocol::T_STRING, 3);
    + xfer += oprot->writeString(this->source_table_name);
    + xfer += oprot->writeFieldEnd();
    +
    + xfer += oprot->writeFieldBegin("dest_db", ::apache::thrift::protocol::T_STRING, 4);
    + xfer += oprot->writeString(this->dest_db);
    + xfer += oprot->writeFieldEnd();
    +
    + xfer += oprot->writeFieldBegin("dest_table_name", ::apache::thrift::protocol::T_STRING, 5);
    + xfer += oprot->writeString(this->dest_table_name);
    + xfer += oprot->writeFieldEnd();
    +
    + xfer += oprot->writeFieldStop();
    + xfer += oprot->writeStructEnd();
    + return xfer;
    +}
    +
    +
    +ThriftHiveMetastore_exchange_partitions_pargs::~ThriftHiveMetastore_exchange_partitions_pargs() throw() {
    +}
    +
    +
    +uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
    + uint32_t xfer = 0;
    + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
    + xfer += oprot->writeStructBegin("ThriftHiveMetastore_exchange_partitions_pargs");
    +
    + xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
    + {
    + xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
    + std::map<std::string, std::string> ::const_iterator _iter912;
    + for (_iter912 = (*(this->partitionSpecs)).begin(); _iter912 != (*(this->partitionSpecs)).end(); ++_iter912)
    + {
    + xfer += oprot->writeString(_iter912->first);
    + xfer += oprot->writeString(_iter912->second);
    + }
    + xfer += oprot->writeMapEnd();
    + }
    + xfer += oprot->writeFieldEnd();
    +
    + xfer += oprot->writeFieldBegin("source_db", ::apache::thrift::protocol::T_STRING, 2);
    + xfer += oprot->writeString((*(this->source_db)));
    + xfer += oprot->writeFieldEnd();
    +
    + xfer += oprot->writeFieldBegin("source_table_name", ::apache::thrift::protocol::T_STRING, 3);
    + xfer += oprot->writeString((*(this->source_table_name)));
    + xfer += oprot->writeFieldEnd();
    +
    + xfer += oprot->writeFieldBegin("dest_db", ::apache::thrift::protocol::T_STRING, 4);
    + xfer += oprot->writeString((*(this->dest_db)));
    + xfer += oprot->writeFieldEnd();
    +
    + xfer += oprot->writeFieldBegin("dest_table_name", ::apache::thrift::protocol::T_STRING, 5);
    + xfer += oprot->writeString((*(this->dest_table_name)));
    + xfer += oprot->writeFieldEnd();
    +
    + xfer += oprot->writeFieldStop();
    + xfer += oprot->writeStructEnd();
    + return xfer;
    +}
    +
    +
    +ThriftHiveMetastore_exchange_partitions_result::~ThriftHiveMetastore_exchange_partitions_result() throw() {
    +}
    +
    +
    +uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift::protocol::TProtocol* iprot) {
    +
    + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
    + uint32_t xfer = 0;
    + std::string fname;
    + ::apache::thrift::protocol::TType ftype;
    + int16_t fid;
    +
    + xfer += iprot->readStructBegin(fname);
    +
    + using ::apache::thrift::protocol::TProtocolException;
    +
    +
    + while (true)
    + {
    + xfer += iprot->readFieldBegin(fname, ftype, fid);
    + if (ftype == ::apache::thrift::protocol::T_STOP) {
    + break;
    + }
    + switch (fid)
    + {
    + case 0:
    + if (ftype == ::apache::thrift::protocol::T_LIST) {
    + {
    + this->success.clear();
    + uint32_t _size913;
    + ::apache::thrift::protocol::TType _etype916;
    + xfer += iprot->readListBegin(_etype916, _size913);
    + this->success.resize(_size913);
    + uint32_t _i917;
    + for (_i917 = 0; _i917 < _size913; ++_i917)
    + {
    + xfer += this->success[_i917].read(iprot);
    + }
    + xfer += iprot->readListEnd();
    + }
    + this->__isset.success = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + case 1:
    + if (ftype == ::apache::thrift::protocol::T_STRUCT) {
    + xfer += this->o1.read(iprot);
    + this->__isset.o1 = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + case 2:
    + if (ftype == ::apache::thrift::protocol::T_STRUCT) {
    + xfer += this->o2.read(iprot);
    + this->__isset.o2 = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + case 3:
    + if (ftype == ::apache::thrift::protocol::T_STRUCT) {
    + xfer += this->o3.read(iprot);
    + this->__isset.o3 = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + case 4:
    + if (ftype == ::apache::thrift::protocol::T_STRUCT) {
    + xfer += this->o4.read(iprot);
    + this->__isset.o4 = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + default:
    + xfer += iprot->skip(ftype);
    + break;
    + }
    + xfer += iprot->readFieldEnd();
    + }
    +
    + xfer += iprot->readStructEnd();
    +
    + return xfer;
    +}
    +
    +uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
    +
    + uint32_t xfer = 0;
    +
    + xfer += oprot->writeStructBegin("ThriftHiveMetastore_exchange_partitions_result");
    +
    + if (this->__isset.success) {
    + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
    + {
    + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
    + std::vector<Partition> ::const_iterator _iter918;
    + for (_iter918 = this->success.begin(); _iter918 != this->success.end(); ++_iter918)
    + {
    + xfer += (*_iter918).write(oprot);
    + }
    + xfer += oprot->writeListEnd();
    + }
    + xfer += oprot->writeFieldEnd();
    + } else if (this->__isset.o1) {
    + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
    + xfer += this->o1.write(oprot);
    + xfer += oprot->writeFieldEnd();
    + } else if (this->__isset.o2) {
    + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
    + xfer += this->o2.write(oprot);
    + xfer += oprot->writeFieldEnd();
    + } else if (this->__isset.o3) {
    + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3);
    + xfer += this->o3.write(oprot);
    + xfer += oprot->writeFieldEnd();
    + } else if (this->__isset.o4) {
    + xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4);
    + xfer += this->o4.write(oprot);
    + xfer += oprot->writeFieldEnd();
    + }
    + xfer += oprot->writeFieldStop();
    + xfer += oprot->writeStructEnd();
    + return xfer;
    +}
    +
    +
    +ThriftHiveMetastore_exchange_partitions_presult::~ThriftHiveMetastore_exchange_partitions_presult() throw() {
    +}
    +
    +
    +uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
    +
    + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
    + uint32_t xfer = 0;
    + std::string fname;
    + ::apache::thrift::protocol::TType ftype;
    + int16_t fid;
    +
    + xfer += iprot->readStructBegin(fname);
    +
    + using ::apache::thrift::protocol::TProtocolException;
    +
    +
    + while (true)
    + {
    + xfer += iprot->readFieldBegin(fname, ftype, fid);
    + if (ftype == ::apache::thrift::protocol::T_STOP) {
    + break;
    + }
    + switch (fid)
    + {
    + case 0:
    + if (ftype == ::apache::thrift::protocol::T_LIST) {
    + {
    + (*(this->success)).clear();
    + uint32_t _size919;
    + ::apache::thrift::protocol::TType _etype922;
    + xfer += iprot->readListBegin(_etype922, _size919);
    + (*(this->success)).resize(_size919);
    + uint32_t _i923;
    + for (_i923 = 0; _i923 < _size919; ++_i923)
    + {
    + xfer += (*(this->success))[_i923].read(iprot);
    + }
    + xfer += iprot->readListEnd();
    + }
    + this->__isset.success = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + case 1:
    + if (ftype == ::apache::thrift::protocol::T_STRUCT) {
    + xfer += this->o1.read(iprot);
    + this->__isset.o1 = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + case 2:
    + if (ftype == ::apache::thrift::protocol::T_STRUCT) {
    + xfer += this->o2.read(iprot);
    + this->__isset.o2 = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + case 3:
    + if (ftype == ::apache::thrift::protocol::T_STRUCT) {
    + xfer += this->o3.read(iprot);
    + this->__isset.o3 = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + case 4:
    + if (ftype == ::apache::thrift::protocol::T_STRUCT) {
    + xfer += this->o4.read(iprot);
    + this->__isset.o4 = true;
    + } else {
    + xfer += iprot->skip(ftype);
    + }
    + break;
    + default:
    + xfer += iprot->skip(ftype);
    + break;
    + }
    + xfer += iprot->readFieldEnd();
    + }
    +
    + xfer += iprot->readStructEnd();
    +
    + return xfer;
    +}
    +
    +
      ThriftHiveMetastore_get_partition_with_auth_args::~ThriftHiveMetastore_get_partition_with_auth_args() throw() {
      }

    @@ -11717,14 +12113,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->part_vals.clear();
    - uint32_t _size904;
    - ::apache::thrift::protocol::TType _etype907;
    - xfer += iprot->readListBegin(_etype907, _size904);
    - this->part_vals.resize(_size904);
    - uint32_t _i908;
    - for (_i908 = 0; _i908 < _size904; ++_i908)
    + uint32_t _size924;
    + ::apache::thrift::protocol::TType _etype927;
    + xfer += iprot->readListBegin(_etype927, _size924);
    + this->part_vals.resize(_size924);
    + uint32_t _i928;
    + for (_i928 = 0; _i928 < _size924; ++_i928)
                  {
    - xfer += iprot->readString(this->part_vals[_i908]);
    + xfer += iprot->readString(this->part_vals[_i928]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -11745,14 +12141,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->group_names.clear();
    - uint32_t _size909;
    - ::apache::thrift::protocol::TType _etype912;
    - xfer += iprot->readListBegin(_etype912, _size909);
    - this->group_names.resize(_size909);
    - uint32_t _i913;
    - for (_i913 = 0; _i913 < _size909; ++_i913)
    + uint32_t _size929;
    + ::apache::thrift::protocol::TType _etype932;
    + xfer += iprot->readListBegin(_etype932, _size929);
    + this->group_names.resize(_size929);
    + uint32_t _i933;
    + for (_i933 = 0; _i933 < _size929; ++_i933)
                  {
    - xfer += iprot->readString(this->group_names[_i913]);
    + xfer += iprot->readString(this->group_names[_i933]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -11789,10 +12185,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
    - std::vector<std::string> ::const_iterator _iter914;
    - for (_iter914 = this->part_vals.begin(); _iter914 != this->part_vals.end(); ++_iter914)
    + std::vector<std::string> ::const_iterator _iter934;
    + for (_iter934 = this->part_vals.begin(); _iter934 != this->part_vals.end(); ++_iter934)
          {
    - xfer += oprot->writeString((*_iter914));
    + xfer += oprot->writeString((*_iter934));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -11805,10 +12201,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
        xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
    - std::vector<std::string> ::const_iterator _iter915;
    - for (_iter915 = this->group_names.begin(); _iter915 != this->group_names.end(); ++_iter915)
    + std::vector<std::string> ::const_iterator _iter935;
    + for (_iter935 = this->group_names.begin(); _iter935 != this->group_names.end(); ++_iter935)
          {
    - xfer += oprot->writeString((*_iter915));
    + xfer += oprot->writeString((*_iter935));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -11840,10 +12236,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
    - std::vector<std::string> ::const_iterator _iter916;
    - for (_iter916 = (*(this->part_vals)).begin(); _iter916 != (*(this->part_vals)).end(); ++_iter916)
    + std::vector<std::string> ::const_iterator _iter936;
    + for (_iter936 = (*(this->part_vals)).begin(); _iter936 != (*(this->part_vals)).end(); ++_iter936)
          {
    - xfer += oprot->writeString((*_iter916));
    + xfer += oprot->writeString((*_iter936));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -11856,10 +12252,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
        xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
    - std::vector<std::string> ::const_iterator _iter917;
    - for (_iter917 = (*(this->group_names)).begin(); _iter917 != (*(this->group_names)).end(); ++_iter917)
    + std::vector<std::string> ::const_iterator _iter937;
    + for (_iter937 = (*(this->group_names)).begin(); _iter937 != (*(this->group_names)).end(); ++_iter937)
          {
    - xfer += oprot->writeString((*_iter917));
    + xfer += oprot->writeString((*_iter937));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -12418,14 +12814,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size918;
    - ::apache::thrift::protocol::TType _etype921;
    - xfer += iprot->readListBegin(_etype921, _size918);
    - this->success.resize(_size918);
    - uint32_t _i922;
    - for (_i922 = 0; _i922 < _size918; ++_i922)
    + uint32_t _size938;
    + ::apache::thrift::protocol::TType _etype941;
    + xfer += iprot->readListBegin(_etype941, _size938);
    + this->success.resize(_size938);
    + uint32_t _i942;
    + for (_i942 = 0; _i942 < _size938; ++_i942)
                  {
    - xfer += this->success[_i922].read(iprot);
    + xfer += this->success[_i942].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -12472,10 +12868,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
    - std::vector<Partition> ::const_iterator _iter923;
    - for (_iter923 = this->success.begin(); _iter923 != this->success.end(); ++_iter923)
    + std::vector<Partition> ::const_iterator _iter943;
    + for (_iter943 = this->success.begin(); _iter943 != this->success.end(); ++_iter943)
            {
    - xfer += (*_iter923).write(oprot);
    + xfer += (*_iter943).write(oprot);
            }
            xfer += oprot->writeListEnd();
          }
    @@ -12524,14 +12920,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size924;
    - ::apache::thrift::protocol::TType _etype927;
    - xfer += iprot->readListBegin(_etype927, _size924);
    - (*(this->success)).resize(_size924);
    - uint32_t _i928;
    - for (_i928 = 0; _i928 < _size924; ++_i928)
    + uint32_t _size944;
    + ::apache::thrift::protocol::TType _etype947;
    + xfer += iprot->readListBegin(_etype947, _size944);
    + (*(this->success)).resize(_size944);
    + uint32_t _i948;
    + for (_i948 = 0; _i948 < _size944; ++_i948)
                  {
    - xfer += (*(this->success))[_i928].read(iprot);
    + xfer += (*(this->success))[_i948].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -12630,14 +13026,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->group_names.clear();
    - uint32_t _size929;
    - ::apache::thrift::protocol::TType _etype932;
    - xfer += iprot->readListBegin(_etype932, _size929);
    - this->group_names.resize(_size929);
    - uint32_t _i933;
    - for (_i933 = 0; _i933 < _size929; ++_i933)
    + uint32_t _size949;
    + ::apache::thrift::protocol::TType _etype952;
    + xfer += iprot->readListBegin(_etype952, _size949);
    + this->group_names.resize(_size949);
    + uint32_t _i953;
    + for (_i953 = 0; _i953 < _size949; ++_i953)
                  {
    - xfer += iprot->readString(this->group_names[_i933]);
    + xfer += iprot->readString(this->group_names[_i953]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -12682,10 +13078,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri
        xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
    - std::vector<std::string> ::const_iterator _iter934;
    - for (_iter934 = this->group_names.begin(); _iter934 != this->group_names.end(); ++_iter934)
    + std::vector<std::string> ::const_iterator _iter954;
    + for (_iter954 = this->group_names.begin(); _iter954 != this->group_names.end(); ++_iter954)
          {
    - xfer += oprot->writeString((*_iter934));
    + xfer += oprot->writeString((*_iter954));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -12725,10 +13121,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr
        xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
    - std::vector<std::string> ::const_iterator _iter935;
    - for (_iter935 = (*(this->group_names)).begin(); _iter935 != (*(this->group_names)).end(); ++_iter935)
    + std::vector<std::string> ::const_iterator _iter955;
    + for (_iter955 = (*(this->group_names)).begin(); _iter955 != (*(this->group_names)).end(); ++_iter955)
          {
    - xfer += oprot->writeString((*_iter935));
    + xfer += oprot->writeString((*_iter955));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -12769,14 +13165,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size936;
    - ::apache::thrift::protocol::TType _etype939;
    - xfer += iprot->readListBegin(_etype939, _size936);
    - this->success.resize(_size936);
    - uint32_t _i940;
    - for (_i940 = 0; _i940 < _size936; ++_i940)
    + uint32_t _size956;
    + ::apache::thrift::protocol::TType _etype959;
    + xfer += iprot->readListBegin(_etype959, _size956);
    + this->success.resize(_size956);
    + uint32_t _i960;
    + for (_i960 = 0; _i960 < _size956; ++_i960)
                  {
    - xfer += this->success[_i940].read(iprot);
    + xfer += this->success[_i960].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -12823,10 +13219,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
    - std::vector<Partition> ::const_iterator _iter941;
    - for (_iter941 = this->success.begin(); _iter941 != this->success.end(); ++_iter941)
    + std::vector<Partition> ::const_iterator _iter961;
    + for (_iter961 = this->success.begin(); _iter961 != this->success.end(); ++_iter961)
            {
    - xfer += (*_iter941).write(oprot);
    + xfer += (*_iter961).write(oprot);
            }
            xfer += oprot->writeListEnd();
          }
    @@ -12875,14 +13271,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size942;
    - ::apache::thrift::protocol::TType _etype945;
    - xfer += iprot->readListBegin(_etype945, _size942);
    - (*(this->success)).resize(_size942);
    - uint32_t _i946;
    - for (_i946 = 0; _i946 < _size942; ++_i946)
    + uint32_t _size962;
    + ::apache::thrift::protocol::TType _etype965;
    + xfer += iprot->readListBegin(_etype965, _size962);
    + (*(this->success)).resize(_size962);
    + uint32_t _i966;
    + for (_i966 = 0; _i966 < _size962; ++_i966)
                  {
    - xfer += (*(this->success))[_i946].read(iprot);
    + xfer += (*(this->success))[_i966].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -13060,14 +13456,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift:
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size947;
    - ::apache::thrift::protocol::TType _etype950;
    - xfer += iprot->readListBegin(_etype950, _size947);
    - this->success.resize(_size947);
    - uint32_t _i951;
    - for (_i951 = 0; _i951 < _size947; ++_i951)
    + uint32_t _size967;
    + ::apache::thrift::protocol::TType _etype970;
    + xfer += iprot->readListBegin(_etype970, _size967);
    + this->success.resize(_size967);
    + uint32_t _i971;
    + for (_i971 = 0; _i971 < _size967; ++_i971)
                  {
    - xfer += this->success[_i951].read(iprot);
    + xfer += this->success[_i971].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -13114,10 +13510,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
    - std::vector<PartitionSpec> ::const_iterator _iter952;
    - for (_iter952 = this->success.begin(); _iter952 != this->success.end(); ++_iter952)
    + std::vector<PartitionSpec> ::const_iterator _iter972;
    + for (_iter972 = this->success.begin(); _iter972 != this->success.end(); ++_iter972)
            {
    - xfer += (*_iter952).write(oprot);
    + xfer += (*_iter972).write(oprot);
            }
            xfer += oprot->writeListEnd();
          }
    @@ -13166,14 +13562,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size953;
    - ::apache::thrift::protocol::TType _etype956;
    - xfer += iprot->readListBegin(_etype956, _size953);
    - (*(this->success)).resize(_size953);
    - uint32_t _i957;
    - for (_i957 = 0; _i957 < _size953; ++_i957)
    + uint32_t _size973;
    + ::apache::thrift::protocol::TType _etype976;
    + xfer += iprot->readListBegin(_etype976, _size973);
    + (*(this->success)).resize(_size973);
    + uint32_t _i977;
    + for (_i977 = 0; _i977 < _size973; ++_i977)
                  {
    - xfer += (*(this->success))[_i957].read(iprot);
    + xfer += (*(this->success))[_i977].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -13351,14 +13747,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift::
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size958;
    - ::apache::thrift::protocol::TType _etype961;
    - xfer += iprot->readListBegin(_etype961, _size958);
    - this->success.resize(_size958);
    - uint32_t _i962;
    - for (_i962 = 0; _i962 < _size958; ++_i962)
    + uint32_t _size978;
    + ::apache::thrift::protocol::TType _etype981;
    + xfer += iprot->readListBegin(_etype981, _size978);
    + this->success.resize(_size978);
    + uint32_t _i982;
    + for (_i982 = 0; _i982 < _size978; ++_i982)
                  {
    - xfer += iprot->readString(this->success[_i962]);
    + xfer += iprot->readString(this->success[_i982]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -13397,10 +13793,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift:
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
    - std::vector<std::string> ::const_iterator _iter963;
    - for (_iter963 = this->success.begin(); _iter963 != this->success.end(); ++_iter963)
    + std::vector<std::string> ::const_iterator _iter983;
    + for (_iter983 = this->success.begin(); _iter983 != this->success.end(); ++_iter983)
            {
    - xfer += oprot->writeString((*_iter963));
    + xfer += oprot->writeString((*_iter983));
            }
            xfer += oprot->writeListEnd();
          }
    @@ -13445,14 +13841,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift:
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size964;
    - ::apache::thrift::protocol::TType _etype967;
    - xfer += iprot->readListBegin(_etype967, _size964);
    - (*(this->success)).resize(_size964);
    - uint32_t _i968;
    - for (_i968 = 0; _i968 < _size964; ++_i968)
    + uint32_t _size984;
    + ::apache::thrift::protocol::TType _etype987;
    + xfer += iprot->readListBegin(_etype987, _size984);
    + (*(this->success)).resize(_size984);
    + uint32_t _i988;
    + for (_i988 = 0; _i988 < _size984; ++_i988)
                  {
    - xfer += iprot->readString((*(this->success))[_i968]);
    + xfer += iprot->readString((*(this->success))[_i988]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -13527,14 +13923,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->part_vals.clear();
    - uint32_t _size969;
    - ::apache::thrift::protocol::TType _etype972;
    - xfer += iprot->readListBegin(_etype972, _size969);
    - this->part_vals.resize(_size969);
    - uint32_t _i973;
    - for (_i973 = 0; _i973 < _size969; ++_i973)
    + uint32_t _size989;
    + ::apache::thrift::protocol::TType _etype992;
    + xfer += iprot->readListBegin(_etype992, _size989);
    + this->part_vals.resize(_size989);
    + uint32_t _i993;
    + for (_i993 = 0; _i993 < _size989; ++_i993)
                  {
    - xfer += iprot->readString(this->part_vals[_i973]);
    + xfer += iprot->readString(this->part_vals[_i993]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -13579,10 +13975,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
    - std::vector<std::string> ::const_iterator _iter974;
    - for (_iter974 = this->part_vals.begin(); _iter974 != this->part_vals.end(); ++_iter974)
    + std::vector<std::string> ::const_iterator _iter994;
    + for (_iter994 = this->part_vals.begin(); _iter994 != this->part_vals.end(); ++_iter994)
          {
    - xfer += oprot->writeString((*_iter974));
    + xfer += oprot->writeString((*_iter994));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -13618,10 +14014,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
    - std::vector<std::string> ::const_iterator _iter975;
    - for (_iter975 = (*(this->part_vals)).begin(); _iter975 != (*(this->part_vals)).end(); ++_iter975)
    + std::vector<std::string> ::const_iterator _iter995;
    + for (_iter995 = (*(this->part_vals)).begin(); _iter995 != (*(this->part_vals)).end(); ++_iter995)
          {
    - xfer += oprot->writeString((*_iter975));
    + xfer += oprot->writeString((*_iter995));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -13666,14 +14062,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size976;
    - ::apache::thrift::protocol::TType _etype979;
    - xfer += iprot->readListBegin(_etype979, _size976);
    - this->success.resize(_size976);
    - uint32_t _i980;
    - for (_i980 = 0; _i980 < _size976; ++_i980)
    + uint32_t _size996;
    + ::apache::thrift::protocol::TType _etype999;
    + xfer += iprot->readListBegin(_etype999, _size996);
    + this->success.resize(_size996);
    + uint32_t _i1000;
    + for (_i1000 = 0; _i1000 < _size996; ++_i1000)
                  {
    - xfer += this->success[_i980].read(iprot);
    + xfer += this->success[_i1000].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -13720,10 +14116,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
    - std::vector<Partition> ::const_iterator _iter981;
    - for (_iter981 = this->success.begin(); _iter981 != this->success.end(); ++_iter981)
    + std::vector<Partition> ::const_iterator _iter1001;
    + for (_iter1001 = this->success.begin(); _iter1001 != this->success.end(); ++_iter1001)
            {
    - xfer += (*_iter981).write(oprot);
    + xfer += (*_iter1001).write(oprot);
            }
            xfer += oprot->writeListEnd();
          }
    @@ -13772,14 +14168,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size982;
    - ::apache::thrift::protocol::TType _etype985;
    - xfer += iprot->readListBegin(_etype985, _size982);
    - (*(this->success)).resize(_size982);
    - uint32_t _i986;
    - for (_i986 = 0; _i986 < _size982; ++_i986)
    + uint32_t _size1002;
    + ::apache::thrift::protocol::TType _etype1005;
    + xfer += iprot->readListBegin(_etype1005, _size1002);
    + (*(this->success)).resize(_size1002);
    + uint32_t _i1006;
    + for (_i1006 = 0; _i1006 < _size1002; ++_i1006)
                  {
    - xfer += (*(this->success))[_i986].read(iprot);
    + xfer += (*(this->success))[_i1006].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -13862,14 +14258,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->part_vals.clear();
    - uint32_t _size987;
    - ::apache::thrift::protocol::TType _etype990;
    - xfer += iprot->readListBegin(_etype990, _size987);
    - this->part_vals.resize(_size987);
    - uint32_t _i991;
    - for (_i991 = 0; _i991 < _size987; ++_i991)
    + uint32_t _size1007;
    + ::apache::thrift::protocol::TType _etype1010;
    + xfer += iprot->readListBegin(_etype1010, _size1007);
    + this->part_vals.resize(_size1007);
    + uint32_t _i1011;
    + for (_i1011 = 0; _i1011 < _size1007; ++_i1011)
                  {
    - xfer += iprot->readString(this->part_vals[_i991]);
    + xfer += iprot->readString(this->part_vals[_i1011]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -13898,14 +14294,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->group_names.clear();
    - uint32_t _size992;
    - ::apache::thrift::protocol::TType _etype995;
    - xfer += iprot->readListBegin(_etype995, _size992);
    - this->group_names.resize(_size992);
    - uint32_t _i996;
    - for (_i996 = 0; _i996 < _size992; ++_i996)
    + uint32_t _size1012;
    + ::apache::thrift::protocol::TType _etype1015;
    + xfer += iprot->readListBegin(_etype1015, _size1012);
    + this->group_names.resize(_size1012);
    + uint32_t _i1016;
    + for (_i1016 = 0; _i1016 < _size1012; ++_i1016)
                  {
    - xfer += iprot->readString(this->group_names[_i996]);
    + xfer += iprot->readString(this->group_names[_i1016]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -13942,10 +14338,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
    - std::vector<std::string> ::const_iterator _iter997;
    - for (_iter997 = this->part_vals.begin(); _iter997 != this->part_vals.end(); ++_iter997)
    + std::vector<std::string> ::const_iterator _iter1017;
    + for (_iter1017 = this->part_vals.begin(); _iter1017 != this->part_vals.end(); ++_iter1017)
          {
    - xfer += oprot->writeString((*_iter997));
    + xfer += oprot->writeString((*_iter1017));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -13962,10 +14358,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t
        xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
    - std::vector<std::string> ::const_iterator _iter998;
    - for (_iter998 = this->group_names.begin(); _iter998 != this->group_names.end(); ++_iter998)
    + std::vector<std::string> ::const_iterator _iter1018;
    + for (_iter1018 = this->group_names.begin(); _iter1018 != this->group_names.end(); ++_iter1018)
          {
    - xfer += oprot->writeString((*_iter998));
    + xfer += oprot->writeString((*_iter1018));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -13997,10 +14393,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache::
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
    - std::vector<std::string> ::const_iterator _iter999;
    - for (_iter999 = (*(this->part_vals)).begin(); _iter999 != (*(this->part_vals)).end(); ++_iter999)
    + std::vector<std::string> ::const_iterator _iter1019;
    + for (_iter1019 = (*(this->part_vals)).begin(); _iter1019 != (*(this->part_vals)).end(); ++_iter1019)
          {
    - xfer += oprot->writeString((*_iter999));
    + xfer += oprot->writeString((*_iter1019));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -14017,10 +14413,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache::
        xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
    - std::vector<std::string> ::const_iterator _iter1000;
    - for (_iter1000 = (*(this->group_names)).begin(); _iter1000 != (*(this->group_names)).end(); ++_iter1000)
    + std::vector<std::string> ::const_iterator _iter1020;
    + for (_iter1020 = (*(this->group_names)).begin(); _iter1020 != (*(this->group_names)).end(); ++_iter1020)
          {
    - xfer += oprot->writeString((*_iter1000));
    + xfer += oprot->writeString((*_iter1020));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -14061,14 +14457,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache::
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size1001;
    - ::apache::thrift::protocol::TType _etype1004;
    - xfer += iprot->readListBegin(_etype1004, _size1001);
    - this->success.resize(_size1001);
    - uint32_t _i1005;
    - for (_i1005 = 0; _i1005 < _size1001; ++_i1005)
    + uint32_t _size1021;
    + ::apache::thrift::protocol::TType _etype1024;
    + xfer += iprot->readListBegin(_etype1024, _size1021);
    + this->success.resize(_size1021);
    + uint32_t _i1025;
    + for (_i1025 = 0; _i1025 < _size1021; ++_i1025)
                  {
    - xfer += this->success[_i1005].read(iprot);
    + xfer += this->success[_i1025].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -14115,10 +14511,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache:
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
    - std::vector<Partition> ::const_iterator _iter1006;
    - for (_iter1006 = this->success.begin(); _iter1006 != this->success.end(); ++_iter1006)
    + std::vector<Partition> ::const_iterator _iter1026;
    + for (_iter1026 = this->success.begin(); _iter1026 != this->success.end(); ++_iter1026)
            {
    - xfer += (*_iter1006).write(oprot);
    + xfer += (*_iter1026).write(oprot);
            }
            xfer += oprot->writeListEnd();
          }
    @@ -14167,14 +14563,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache:
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size1007;
    - ::apache::thrift::protocol::TType _etype1010;
    - xfer += iprot->readListBegin(_etype1010, _size1007);
    - (*(this->success)).resize(_size1007);
    - uint32_t _i1011;
    - for (_i1011 = 0; _i1011 < _size1007; ++_i1011)
    + uint32_t _size1027;
    + ::apache::thrift::protocol::TType _etype1030;
    + xfer += iprot->readListBegin(_etype1030, _size1027);
    + (*(this->success)).resize(_size1027);
    + uint32_t _i1031;
    + for (_i1031 = 0; _i1031 < _size1027; ++_i1031)
                  {
    - xfer += (*(this->success))[_i1011].read(iprot);
    + xfer += (*(this->success))[_i1031].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -14257,14 +14653,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift:
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->part_vals.clear();
    - uint32_t _size1012;
    - ::apache::thrift::protocol::TType _etype1015;
    - xfer += iprot->readListBegin(_etype1015, _size1012);
    - this->part_vals.resize(_size1012);
    - uint32_t _i1016;
    - for (_i1016 = 0; _i1016 < _size1012; ++_i1016)
    + uint32_t _size1032;
    + ::apache::thrift::protocol::TType _etype1035;
    + xfer += iprot->readListBegin(_etype1035, _size1032);
    + this->part_vals.resize(_size1032);
    + uint32_t _i1036;
    + for (_i1036 = 0; _i1036 < _size1032; ++_i1036)
                  {
    - xfer += iprot->readString(this->part_vals[_i1016]);
    + xfer += iprot->readString(this->part_vals[_i1036]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -14309,10 +14705,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
    - std::vector<std::string> ::const_iterator _iter1017;
    - for (_iter1017 = this->part_vals.begin(); _iter1017 != this->part_vals.end(); ++_iter1017)
    + std::vector<std::string> ::const_iterator _iter1037;
    + for (_iter1037 = this->part_vals.begin(); _iter1037 != this->part_vals.end(); ++_iter1037)
          {
    - xfer += oprot->writeString((*_iter1017));
    + xfer += oprot->writeString((*_iter1037));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -14348,10 +14744,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
    - std::vector<std::string> ::const_iterator _iter1018;
    - for (_iter1018 = (*(this->part_vals)).begin(); _iter1018 != (*(this->part_vals)).end(); ++_iter1018)
    + std::vector<std::string> ::const_iterator _iter1038;
    + for (_iter1038 = (*(this->part_vals)).begin(); _iter1038 != (*(this->part_vals)).end(); ++_iter1038)
          {
    - xfer += oprot->writeString((*_iter1018));
    + xfer += oprot->writeString((*_iter1038));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -14396,14 +14792,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size1019;
    - ::apache::thrift::protocol::TType _etype1022;
    - xfer += iprot->readListBegin(_etype1022, _size1019);
    - this->success.resize(_size1019);
    - uint32_t _i1023;
    - for (_i1023 = 0; _i1023 < _size1019; ++_i1023)
    + uint32_t _size1039;
    + ::apache::thrift::protocol::TType _etype1042;
    + xfer += iprot->readListBegin(_etype1042, _size1039);
    + this->success.resize(_size1039);
    + uint32_t _i1043;
    + for (_i1043 = 0; _i1043 < _size1039; ++_i1043)
                  {
    - xfer += iprot->readString(this->success[_i1023]);
    + xfer += iprot->readString(this->success[_i1043]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -14450,10 +14846,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
    - std::vector<std::string> ::const_iterator _iter1024;
    - for (_iter1024 = this->success.begin(); _iter1024 != this->success.end(); ++_iter1024)
    + std::vector<std::string> ::const_iterator _iter1044;
    + for (_iter1044 = this->success.begin(); _iter1044 != this->success.end(); ++_iter1044)
            {
    - xfer += oprot->writeString((*_iter1024));
    + xfer += oprot->writeString((*_iter1044));
            }
            xfer += oprot->writeListEnd();
          }
    @@ -14502,14 +14898,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size1025;
    - ::apache::thrift::protocol::TType _etype1028;
    - xfer += iprot->readListBegin(_etype1028, _size1025);
    - (*(this->success)).resize(_size1025);
    - uint32_t _i1029;
    - for (_i1029 = 0; _i1029 < _size1025; ++_i1029)
    + uint32_t _size1045;
    + ::apache::thrift::protocol::TType _etype1048;
    + xfer += iprot->readListBegin(_etype1048, _size1045);
    + (*(this->success)).resize(_size1045);
    + uint32_t _i1049;
    + for (_i1049 = 0; _i1049 < _size1045; ++_i1049)
                  {
    - xfer += iprot->readString((*(this->success))[_i1029]);
    + xfer += iprot->readString((*(this->success))[_i1049]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -14703,14 +15099,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size1030;
    - ::apache::thrift::protocol::TType _etype1033;
    - xfer += iprot->readListBegin(_etype1033, _size1030);
    - this->success.resize(_size1030);
    - uint32_t _i1034;
    - for (_i1034 = 0; _i1034 < _size1030; ++_i1034)
    + uint32_t _size1050;
    + ::apache::thrift::protocol::TType _etype1053;
    + xfer += iprot->readListBegin(_etype1053, _size1050);
    + this->success.resize(_size1050);
    + uint32_t _i1054;
    + for (_i1054 = 0; _i1054 < _size1050; ++_i1054)
                  {
    - xfer += this->success[_i1034].read(iprot);
    + xfer += this->success[_i1054].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -14757,10 +15153,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
    - std::vector<Partition> ::const_iterator _iter1035;
    - for (_iter1035 = this->success.begin(); _iter1035 != this->success.end(); ++_iter1035)
    + std::vector<Partition> ::const_iterator _iter1055;
    + for (_iter1055 = this->success.begin(); _iter1055 != this->success.end(); ++_iter1055)
            {
    - xfer += (*_iter1035).write(oprot);
    + xfer += (*_iter1055).write(oprot);
            }
            xfer += oprot->writeListEnd();
          }
    @@ -14809,14 +15205,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size1036;
    - ::apache::thrift::protocol::TType _etype1039;
    - xfer += iprot->readListBegin(_etype1039, _size1036);
    - (*(this->success)).resize(_size1036);
    - uint32_t _i1040;
    - for (_i1040 = 0; _i1040 < _size1036; ++_i1040)
    + uint32_t _size1056;
    + ::apache::thrift::protocol::TType _etype1059;
    + xfer += iprot->readListBegin(_etype1059, _size1056);
    + (*(this->success)).resize(_size1056);
    + uint32_t _i1060;
    + for (_i1060 = 0; _i1060 < _size1056; ++_i1060)
                  {
    - xfer += (*(this->success))[_i1040].read(iprot);
    + xfer += (*(this->success))[_i1060].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -15010,14 +15406,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size1041;
    - ::apache::thrift::protocol::TType _etype1044;
    - xfer += iprot->readListBegin(_etype1044, _size1041);
    - this->success.resize(_size1041);
    - uint32_t _i1045;
    - for (_i1045 = 0; _i1045 < _size1041; ++_i1045)
    + uint32_t _size1061;
    + ::apache::thrift::protocol::TType _etype1064;
    + xfer += iprot->readListBegin(_etype1064, _size1061);
    + this->success.resize(_size1061);
    + uint32_t _i1065;
    + for (_i1065 = 0; _i1065 < _size1061; ++_i1065)
                  {
    - xfer += this->success[_i1045].read(iprot);
    + xfer += this->success[_i1065].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -15064,10 +15460,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
    - std::vector<PartitionSpec> ::const_iterator _iter1046;
    - for (_iter1046 = this->success.begin(); _iter1046 != this->success.end(); ++_iter1046)
    + std::vector<PartitionSpec> ::const_iterator _iter1066;
    + for (_iter1066 = this->success.begin(); _iter1066 != this->success.end(); ++_iter1066)
            {
    - xfer += (*_iter1046).write(oprot);
    + xfer += (*_iter1066).write(oprot);
            }
            xfer += oprot->writeListEnd();
          }
    @@ -15116,14 +15512,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size1047;
    - ::apache::thrift::protocol::TType _etype1050;
    - xfer += iprot->readListBegin(_etype1050, _size1047);
    - (*(this->success)).resize(_size1047);
    - uint32_t _i1051;
    - for (_i1051 = 0; _i1051 < _size1047; ++_i1051)
    + uint32_t _size1067;
    + ::apache::thrift::protocol::TType _etype1070;
    + xfer += iprot->readListBegin(_etype1070, _size1067);
    + (*(this->success)).resize(_size1067);
    + uint32_t _i1071;
    + for (_i1071 = 0; _i1071 < _size1067; ++_i1071)
                  {
    - xfer += (*(this->success))[_i1051].read(iprot);
    + xfer += (*(this->success))[_i1071].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -15433,14 +15829,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->names.clear();
    - uint32_t _size1052;
    - ::apache::thrift::protocol::TType _etype1055;
    - xfer += iprot->readListBegin(_etype1055, _size1052);
    - this->names.resize(_size1052);
    - uint32_t _i1056;
    - for (_i1056 = 0; _i1056 < _size1052; ++_i1056)
    + uint32_t _size1072;
    + ::apache::thrift::protocol::TType _etype1075;
    + xfer += iprot->readListBegin(_etype1075, _size1072);
    + this->names.resize(_size1072);
    + uint32_t _i1076;
    + for (_i1076 = 0; _i1076 < _size1072; ++_i1076)
                  {
    - xfer += iprot->readString(this->names[_i1056]);
    + xfer += iprot->readString(this->names[_i1076]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -15477,10 +15873,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif
        xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->names.size()));
    - std::vector<std::string> ::const_iterator _iter1057;
    - for (_iter1057 = this->names.begin(); _iter1057 != this->names.end(); ++_iter1057)
    + std::vector<std::string> ::const_iterator _iter1077;
    + for (_iter1077 = this->names.begin(); _iter1077 != this->names.end(); ++_iter1077)
          {
    - xfer += oprot->writeString((*_iter1057));
    + xfer += oprot->writeString((*_iter1077));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -15512,10 +15908,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri
        xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->names)).size()));
    - std::vector<std::string> ::const_iterator _iter1058;
    - for (_iter1058 = (*(this->names)).begin(); _iter1058 != (*(this->names)).end(); ++_iter1058)
    + std::vector<std::string> ::const_iterator _iter1078;
    + for (_iter1078 = (*(this->names)).begin(); _iter1078 != (*(this->names)).end(); ++_iter1078)
          {
    - xfer += oprot->writeString((*_iter1058));
    + xfer += oprot->writeString((*_iter1078));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -15556,14 +15952,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size1059;
    - ::apache::thrift::protocol::TType _etype1062;
    - xfer += iprot->readListBegin(_etype1062, _size1059);
    - this->success.resize(_size1059);
    - uint32_t _i1063;
    - for (_i1063 = 0; _i1063 < _size1059; ++_i1063)
    + uint32_t _size1079;
    + ::apache::thrift::protocol::TType _etype1082;
    + xfer += iprot->readListBegin(_etype1082, _size1079);
    + this->success.resize(_size1079);
    + uint32_t _i1083;
    + for (_i1083 = 0; _i1083 < _size1079; ++_i1083)
                  {
    - xfer += this->success[_i1063].read(iprot);
    + xfer += this->success[_i1083].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -15610,10 +16006,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
    - std::vector<Partition> ::const_iterator _iter1064;
    - for (_iter1064 = this->success.begin(); _iter1064 != this->success.end(); ++_iter1064)
    + std::vector<Partition> ::const_iterator _iter1084;
    + for (_iter1084 = this->success.begin(); _iter1084 != this->success.end(); ++_iter1084)
            {
    - xfer += (*_iter1064).write(oprot);
    + xfer += (*_iter1084).write(oprot);
            }
            xfer += oprot->writeListEnd();
          }
    @@ -15662,14 +16058,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size1065;
    - ::apache::thrift::protocol::TType _etype1068;
    - xfer += iprot->readListBegin(_etype1068, _size1065);
    - (*(this->success)).resize(_size1065);
    - uint32_t _i1069;
    - for (_i1069 = 0; _i1069 < _size1065; ++_i1069)
    + uint32_t _size1085;
    + ::apache::thrift::protocol::TType _etype1088;
    + xfer += iprot->readListBegin(_etype1088, _size1085);
    + (*(this->success)).resize(_size1085);
    + uint32_t _i1089;
    + for (_i1089 = 0; _i1089 < _size1085; ++_i1089)
                  {
    - xfer += (*(this->success))[_i1069].read(iprot);
    + xfer += (*(this->success))[_i1089].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -15991,14 +16387,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->new_parts.clear();
    - uint32_t _size1070;
    - ::apache::thrift::protocol::TType _etype1073;
    - xfer += iprot->readListBegin(_etype1073, _size1070);
    - this->new_parts.resize(_size1070);
    - uint32_t _i1074;
    - for (_i1074 = 0; _i1074 < _size1070; ++_i1074)
    + uint32_t _size1090;
    + ::apache::thrift::protocol::TType _etype1093;
    + xfer += iprot->readListBegin(_etype1093, _size1090);
    + this->new_parts.resize(_size1090);
    + uint32_t _i1094;
    + for (_i1094 = 0; _i1094 < _size1090; ++_i1094)
                  {
    - xfer += this->new_parts[_i1074].read(iprot);
    + xfer += this->new_parts[_i1094].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -16035,10 +16431,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot
        xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
    - std::vector<Partition> ::const_iterator _iter1075;
    - for (_iter1075 = this->new_parts.begin(); _iter1075 != this->new_parts.end(); ++_iter1075)
    + std::vector<Partition> ::const_iterator _iter1095;
    + for (_iter1095 = this->new_parts.begin(); _iter1095 != this->new_parts.end(); ++_iter1095)
          {
    - xfer += (*_iter1075).write(oprot);
    + xfer += (*_iter1095).write(oprot);
          }
          xfer += oprot->writeListEnd();
        }
    @@ -16070,10 +16466,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro
        xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
    - std::vector<Partition> ::const_iterator _iter1076;
    - for (_iter1076 = (*(this->new_parts)).begin(); _iter1076 != (*(this->new_parts)).end(); ++_iter1076)
    + std::vector<Partition> ::const_iterator _iter1096;
    + for (_iter1096 = (*(this->new_parts)).begin(); _iter1096 != (*(this->new_parts)).end(); ++_iter1096)
          {
    - xfer += (*_iter1076).write(oprot);
    + xfer += (*_iter1096).write(oprot);
          }
          xfer += oprot->writeListEnd();
        }
    @@ -16513,14 +16909,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->part_vals.clear();
    - uint32_t _size1077;
    - ::apache::thrift::protocol::TType _etype1080;
    - xfer += iprot->readListBegin(_etype1080, _size1077);
    - this->part_vals.resize(_size1077);
    - uint32_t _i1081;
    - for (_i1081 = 0; _i1081 < _size1077; ++_i1081)
    + uint32_t _size1097;
    + ::apache::thrift::protocol::TType _etype1100;
    + xfer += iprot->readListBegin(_etype1100, _size1097);
    + this->part_vals.resize(_size1097);
    + uint32_t _i1101;
    + for (_i1101 = 0; _i1101 < _size1097; ++_i1101)
                  {
    - xfer += iprot->readString(this->part_vals[_i1081]);
    + xfer += iprot->readString(this->part_vals[_i1101]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -16565,10 +16961,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
    - std::vector<std::string> ::const_iterator _iter1082;
    - for (_iter1082 = this->part_vals.begin(); _iter1082 != this->part_vals.end(); ++_iter1082)
    + std::vector<std::string> ::const_iterator _iter1102;
    + for (_iter1102 = this->part_vals.begin(); _iter1102 != this->part_vals.end(); ++_iter1102)
          {
    - xfer += oprot->writeString((*_iter1082));
    + xfer += oprot->writeString((*_iter1102));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -16604,10 +17000,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
    - std::vector<std::string> ::const_iterator _iter1083;
    - for (_iter1083 = (*(this->part_vals)).begin(); _iter1083 != (*(this->part_vals)).end(); ++_iter1083)
    + std::vector<std::string> ::const_iterator _iter1103;
    + for (_iter1103 = (*(this->part_vals)).begin(); _iter1103 != (*(this->part_vals)).end(); ++_iter1103)
          {
    - xfer += oprot->writeString((*_iter1083));
    + xfer += oprot->writeString((*_iter1103));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -16780,14 +17176,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->part_vals.clear();
    - uint32_t _size1084;
    - ::apache::thrift::protocol::TType _etype1087;
    - xfer += iprot->readListBegin(_etype1087, _size1084);
    - this->part_vals.resize(_size1084);
    - uint32_t _i1088;
    - for (_i1088 = 0; _i1088 < _size1084; ++_i1088)
    + uint32_t _size1104;
    + ::apache::thrift::protocol::TType _etype1107;
    + xfer += iprot->readListBegin(_etype1107, _size1104);
    + this->part_vals.resize(_size1104);
    + uint32_t _i1108;
    + for (_i1108 = 0; _i1108 < _size1104; ++_i1108)
                  {
    - xfer += iprot->readString(this->part_vals[_i1088]);
    + xfer += iprot->readString(this->part_vals[_i1108]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -16824,10 +17220,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
    - std::vector<std::string> ::const_iterator _iter1089;
    - for (_iter1089 = this->part_vals.begin(); _iter1089 != this->part_vals.end(); ++_iter1089)
    + std::vector<std::string> ::const_iterator _iter1109;
    + for (_iter1109 = this->part_vals.begin(); _iter1109 != this->part_vals.end(); ++_iter1109)
          {
    - xfer += oprot->writeString((*_iter1089));
    + xfer += oprot->writeString((*_iter1109));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -16855,10 +17251,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(::
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1);
        {
          xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
    - std::vector<std::string> ::const_iterator _iter1090;
    - for (_iter1090 = (*(this->part_vals)).begin(); _iter1090 != (*(this->part_vals)).end(); ++_iter1090)
    + std::vector<std::string> ::const_iterator _iter1110;
    + for (_iter1110 = (*(this->part_vals)).begin(); _iter1110 != (*(this->part_vals)).end(); ++_iter1110)
          {
    - xfer += oprot->writeString((*_iter1090));
    + xfer += oprot->writeString((*_iter1110));
          }
          xfer += oprot->writeListEnd();
        }
    @@ -17333,14 +17729,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size1091;
    - ::apache::thrift::protocol::TType _etype1094;
    - xfer += iprot->readListBegin(_etype1094, _size1091);
    - this->success.resize(_size1091);
    - uint32_t _i1095;
    - for (_i1095 = 0; _i1095 < _size1091; ++_i1095)
    + uint32_t _size1111;
    + ::apache::thrift::protocol::TType _etype1114;
    + xfer += iprot->readListBegin(_etype1114, _size1111);
    + this->success.resize(_size1111);
    + uint32_t _i1115;
    + for (_i1115 = 0; _i1115 < _size1111; ++_i1115)
                  {
    - xfer += iprot->readString(this->success[_i1095]);
    + xfer += iprot->readString(this->success[_i1115]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -17379,10 +17775,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
    - std::vector<std::string> ::const_iterator _iter1096;
    - for (_iter1096 = this->success.begin(); _iter1096 != this->success.end(); ++_iter1096)
    + std::vector<std::string> ::const_iterator _iter1116;
    + for (_iter1116 = this->success.begin(); _iter1116 != this->success.end(); ++_iter1116)
            {
    - xfer += oprot->writeString((*_iter1096));
    + xfer += oprot->writeString((*_iter1116));
            }
            xfer += oprot->writeListEnd();
          }
    @@ -17427,14 +17823,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size1097;
    - ::apache::thrift::protocol::TType _etype1100;
    - xfer += iprot->readListBegin(_etype1100, _size1097);
    - (*(this->success)).resize(_size1097);
    - uint32_t _i1101;
    - for (_i1101 = 0; _i1101 < _size1097; ++_i1101)
    + uint32_t _size1117;
    + ::apache::thrift::protocol::TType _etype1120;
    + xfer += iprot->readListBegin(_etype1120, _size1117);
    + (*(this->success)).resize(_size1117);
    + uint32_t _i1121;
    + for (_i1121 = 0; _i1121 < _size1117; ++_i1121)
                  {
    - xfer += iprot->readString((*(this->success))[_i1101]);
    + xfer += iprot->readString((*(this->success))[_i1121]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -17572,17 +17968,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif
              if (ftype == ::apache::thrift::protocol::T_MAP) {
                {
                  this->success.clear();
    - uint32_t _size1102;
    - ::apache::thrift::protocol::TType _ktype1103;
    - ::apache::thrift::protocol::TType _vtype1104;
    - xfer += iprot->readMapBegin(_ktype1103, _vtype1104, _size1102);
    - uint32_t _i1106;
    - for (_i1106 = 0; _i1106 < _size1102; ++_i1106)
    + uint32_t _size1122;
    + ::apache::thrift::protocol::TType _ktype1123;
    + ::apache::thrift::protocol::TType _vtype1124;
    + xfer += iprot->readMapBegin(_ktype1123, _vtype1124, _size1122);
    + uint32_t _i1126;
    + for (_i1126 = 0; _i1126 < _size1122; ++_i1126)
                  {
    - std::string _key1107;
    - xfer += iprot->readString(_key1107);
    - std::string& _val1108 = this->success[_key1107];
    - xfer += iprot->readString(_val1108);
    + std::string _key1127;
    + xfer += iprot->readString(_key1127);
    + std::string& _val1128 = this->success[_key1127];
    + xfer += iprot->readString(_val1128);
                  }
                  xfer += iprot->readMapEnd();
                }
    @@ -17621,11 +18017,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
          {
            xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
    - std::map<std::string, std::string> ::const_iterator _iter1109;
    - for (_iter1109 = this->success.begin(); _iter1109 != this->success.end(); ++_iter1109)
    + std::map<std::string, std::string> ::const_iterator _iter1129;
    + for (_iter1129 = this->success.begin(); _iter1129 != this->success.end(); ++_iter1129)
            {
    - xfer += oprot->writeString(_iter1109->first);
    - xfer += oprot->writeString(_iter1109->second);
    + xfer += oprot->writeString(_iter1129->first);
    + xfer += oprot->writeString(_iter1129->second);
            }
            xfer += oprot->writeMapEnd();
          }
    @@ -17670,17 +18066,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri
              if (ftype == ::apache::thrift::protocol::T_MAP) {
                {
                  (*(this->success)).clear();
    - uint32_t _size1110;
    - ::apache::thrift::protocol::TType _ktype1111;
    - ::apache::thrift::protocol::TType _vtype1112;
    - xfer += iprot->readMapBegin(_ktype1111, _vtype1112, _size1110);
    - uint32_t _i1114;
    - for (_i1114 = 0; _i1114 < _size1110; ++_i1114)
    + uint32_t _size1130;
    + ::apache::thrift::protocol::TType _ktype1131;
    + ::apache::thrift::protocol::TType _vtype1132;
    + xfer += iprot->readMapBegin(_ktype1131, _vtype1132, _size1130);
    + uint32_t _i1134;
    + for (_i1134 = 0; _i1134 < _size1130; ++_i1134)
                  {
    - std::string _key1115;
    - xfer += iprot->readString(_key1115);
    - std::string& _val1116 = (*(this->success))[_key1115];
    - xfer += iprot->readString(_val1116);
    + std::string _key1135;
    + xfer += iprot->readString(_key1135);
    + std::string& _val1136 = (*(this->success))[_key1135];
    + xfer += iprot->readString(_val1136);
                  }
                  xfer += iprot->readMapEnd();
                }
    @@ -17755,17 +18151,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift::
              if (ftype == ::apache::thrift::protocol::T_MAP) {
                {
                  this->part_vals.clear();
    - uint32_t _size1117;
    - ::apache::thrift::protocol::TType _ktype1118;
    - ::apache::thrift::protocol::TType _vtype1119;
    - xfer += iprot->readMapBegin(_ktype1118, _vtype1119, _size1117);
    - uint32_t _i1121;
    - for (_i1121 = 0; _i1121 < _size1117; ++_i1121)
    + uint32_t _size1137;
    + ::apache::thrift::protocol::TType _ktype1138;
    + ::apache::thrift::protocol::TType _vtype1139;
    + xfer += iprot->readMapBegin(_ktype1138, _vtype1139, _size1137);
    + uint32_t _i1141;
    + for (_i1141 = 0; _i1141 < _size1137; ++_i1141)
                  {
    - std::string _key1122;
    - xfer += iprot->readString(_key1122);
    - std::string& _val1123 = this->part_vals[_key1122];
    - xfer += iprot->readString(_val1123);
    + std::string _key1142;
    + xfer += iprot->readString(_key1142);
    + std::string& _val1143 = this->part_vals[_key1142];
    + xfer += iprot->readString(_val1143);
                  }
                  xfer += iprot->readMapEnd();
                }
    @@ -17776,9 +18172,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift::
              break;
            case 4:
              if (ftype == ::apache::thrift::protocol::T_I32) {
    - int32_t ecast1124;
    - xfer += iprot->readI32(ecast1124);
    - this->eventType = (PartitionEventType::type)ecast1124;
    + int32_t ecast1144;
    + xfer += iprot->readI32(ecast1144);
    + this->eventType = (PartitionEventType::type)ecast1144;
                this->__isset.eventType = true;
              } else {
                xfer += iprot->skip(ftype);
    @@ -17812,11 +18208,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift:
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3);
        {
          xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
    - std::map<std::string, std::string> ::const_iterator _iter1125;
    - for (_iter1125 = this->part_vals.begin(); _iter1125 != this->part_vals.end(); ++_iter1125)
    + std::map<std::string, std::string> ::const_iterator _iter1145;
    + for (_iter1145 = this->part_vals.begin(); _iter1145 != this->part_vals.end(); ++_iter1145)
          {
    - xfer += oprot->writeString(_iter1125->first);
    - xfer += oprot->writeString(_iter1125->second);
    + xfer += oprot->writeString(_iter1145->first);
    + xfer += oprot->writeString(_iter1145->second);
          }
          xfer += oprot->writeMapEnd();
        }
    @@ -17852,11 +18248,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3);
        {
          xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
    - std::map<std::string, std::string> ::const_iterator _iter1126;
    - for (_iter1126 = (*(this->part_vals)).begin(); _iter1126 != (*(this->part_vals)).end(); ++_iter1126)
    + std::map<std::string, std::string> ::const_iterator _iter1146;
    + for (_iter1146 = (*(this->part_vals)).begin(); _iter1146 != (*(this->part_vals)).end(); ++_iter1146)
          {
    - xfer += oprot->writeString(_iter1126->first);
    - xfer += oprot->writeString(_iter1126->second);
    + xfer += oprot->writeString(_iter1146->first);
    + xfer += oprot->writeString(_iter1146->second);
          }
          xfer += oprot->writeMapEnd();
        }
    @@ -18125,17 +18521,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri
              if (ftype == ::apache::thrift::protocol::T_MAP) {
                {
                  this->part_vals.clear();
    - uint32_t _size1127;
    - ::apache::thrift::protocol::TType _ktype1128;
    - ::apache::thrift::protocol::TType _vtype1129;
    - xfer += iprot->readMapBegin(_ktype1128, _vtype1129, _size1127);
    - uint32_t _i1131;
    - for (_i1131 = 0; _i1131 < _size1127; ++_i1131)
    + uint32_t _size1147;
    + ::apache::thrift::protocol::TType _ktype1148;
    + ::apache::thrift::protocol::TType _vtype1149;
    + xfer += iprot->readMapBegin(_ktype1148, _vtype1149, _size1147);
    + uint32_t _i1151;
    + for (_i1151 = 0; _i1151 < _size1147; ++_i1151)
                  {
    - std::string _key1132;
    - xfer += iprot->readString(_key1132);
    - std::string& _val1133 = this->part_vals[_key1132];
    - xfer += iprot->readString(_val1133);
    + std::string _key1152;
    + xfer += iprot->readString(_key1152);
    + std::string& _val1153 = this->part_vals[_key1152];
    + xfer += iprot->readString(_val1153);
                  }
                  xfer += iprot->readMapEnd();
                }
    @@ -18146,9 +18542,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri
              break;
            case 4:
              if (ftype == ::apache::thrift::protocol::T_I32) {
    - int32_t ecast1134;
    - xfer += iprot->readI32(ecast1134);
    - this->eventType = (PartitionEventType::type)ecast1134;
    + int32_t ecast1154;
    + xfer += iprot->readI32(ecast1154);
    + this->eventType = (PartitionEventType::type)ecast1154;
                this->__isset.eventType = true;
              } else {
                xfer += iprot->skip(ftype);
    @@ -18182,11 +18578,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3);
        {
          xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
    - std::map<std::string, std::string> ::const_iterator _iter1135;
    - for (_iter1135 = this->part_vals.begin(); _iter1135 != this->part_vals.end(); ++_iter1135)
    + std::map<std::string, std::string> ::const_iterator _iter1155;
    + for (_iter1155 = this->part_vals.begin(); _iter1155 != this->part_vals.end(); ++_iter1155)
          {
    - xfer += oprot->writeString(_iter1135->first);
    - xfer += oprot->writeString(_iter1135->second);
    + xfer += oprot->writeString(_iter1155->first);
    + xfer += oprot->writeString(_iter1155->second);
          }
          xfer += oprot->writeMapEnd();
        }
    @@ -18222,11 +18618,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th
        xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3);
        {
          xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
    - std::map<std::string, std::string> ::const_iterator _iter1136;
    - for (_iter1136 = (*(this->part_vals)).begin(); _iter1136 != (*(this->part_vals)).end(); ++_iter1136)
    + std::map<std::string, std::string> ::const_iterator _iter1156;
    + for (_iter1156 = (*(this->part_vals)).begin(); _iter1156 != (*(this->part_vals)).end(); ++_iter1156)
          {
    - xfer += oprot->writeString(_iter1136->first);
    - xfer += oprot->writeString(_iter1136->second);
    + xfer += oprot->writeString(_iter1156->first);
    + xfer += oprot->writeString(_iter1156->second);
          }
          xfer += oprot->writeMapEnd();
        }
    @@ -19662,14 +20058,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size1137;
    - ::apache::thrift::protocol::TType _etype1140;
    - xfer += iprot->readListBegin(_etype1140, _size1137);
    - this->success.resize(_size1137);
    - uint32_t _i1141;
    - for (_i1141 = 0; _i1141 < _size1137; ++_i1141)
    + uint32_t _size1157;
    + ::apache::thrift::protocol::TType _etype1160;
    + xfer += iprot->readListBegin(_etype1160, _size1157);
    + this->success.resize(_size1157);
    + uint32_t _i1161;
    + for (_i1161 = 0; _i1161 < _size1157; ++_i1161)
                  {
    - xfer += this->success[_i1141].read(iprot);
    + xfer += this->success[_i1161].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -19716,10 +20112,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
    - std::vector<Index> ::const_iterator _iter1142;
    - for (_iter1142 = this->success.begin(); _iter1142 != this->success.end(); ++_iter1142)
    + std::vector<Index> ::const_iterator _iter1162;
    + for (_iter1162 = this->success.begin(); _iter1162 != this->success.end(); ++_iter1162)
            {
    - xfer += (*_iter1142).write(oprot);
    + xfer += (*_iter1162).write(oprot);
            }
            xfer += oprot->writeListEnd();
          }
    @@ -19768,14 +20164,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size1143;
    - ::apache::thrift::protocol::TType _etype1146;
    - xfer += iprot->readListBegin(_etype1146, _size1143);
    - (*(this->success)).resize(_size1143);
    - uint32_t _i1147;
    - for (_i1147 = 0; _i1147 < _size1143; ++_i1147)
    + uint32_t _size1163;
    + ::apache::thrift::protocol::TType _etype1166;
    + xfer += iprot->readListBegin(_etype1166, _size1163);
    + (*(this->success)).resize(_size1163);
    + uint32_t _i1167;
    + for (_i1167 = 0; _i1167 < _size1163; ++_i1167)
                  {
    - xfer += (*(this->success))[_i1147].read(iprot);
    + xfer += (*(this->success))[_i1167].read(iprot);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -19953,14 +20349,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  this->success.clear();
    - uint32_t _size1148;
    - ::apache::thrift::protocol::TType _etype1151;
    - xfer += iprot->readListBegin(_etype1151, _size1148);
    - this->success.resize(_size1148);
    - uint32_t _i1152;
    - for (_i1152 = 0; _i1152 < _size1148; ++_i1152)
    + uint32_t _size1168;
    + ::apache::thrift::protocol::TType _etype1171;
    + xfer += iprot->readListBegin(_etype1171, _size1168);
    + this->success.resize(_size1168);
    + uint32_t _i1172;
    + for (_i1172 = 0; _i1172 < _size1168; ++_i1172)
                  {
    - xfer += iprot->readString(this->success[_i1152]);
    + xfer += iprot->readString(this->success[_i1172]);
                  }
                  xfer += iprot->readListEnd();
                }
    @@ -19999,10 +20395,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro
          xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
          {
            xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
    - std::vector<std::string> ::const_iterator _iter1153;
    - for (_iter1153 = this->success.begin(); _iter1153 != this->success.end(); ++_iter1153)
    + std::vector<std::string> ::const_iterator _iter1173;
    + for (_iter1173 = this->success.begin(); _iter1173 != this->success.end(); ++_iter1173)
            {
    - xfer += oprot->writeString((*_iter1153));
    + xfer += oprot->writeString((*_iter1173));
            }
            xfer += oprot->writeListEnd();
          }
    @@ -20047,14 +20443,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro
              if (ftype == ::apache::thrift::protocol::T_LIST) {
                {
                  (*(this->success)).clear();
    - uint32_t _size1154;
    - ::apache::thrift::protocol::TType _etyp

    <TRUNCATED>
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-12292 : revert the if removal from HIVE-12237 (Sergey Shelukhin, reviewed by Ashutosh Chauhan)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6fda3b55
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6fda3b55
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6fda3b55

    Branch: refs/heads/master-fixed
    Commit: 6fda3b55e9ae680f47c55395f90be762285f2760
    Parents: c9246f4
    Author: Sergey Shelukhin <sershe@apache.org>
    Authored: Mon Nov 2 13:03:01 2015 -0800
    Committer: Sergey Shelukhin <sershe@apache.org>
    Committed: Mon Nov 2 13:03:01 2015 -0800

    ----------------------------------------------------------------------
      .../org/apache/hadoop/hive/cli/CliDriver.java | 4 +-
      .../hadoop/hive/llap/cache/BuddyAllocator.java | 9 ++--
      .../hive/llap/cache/LowLevelCacheImpl.java | 5 +-
      .../llap/cache/LowLevelCacheMemoryManager.java | 5 +-
      .../llap/cache/LowLevelFifoCachePolicy.java | 4 +-
      .../llap/cache/LowLevelLrfuCachePolicy.java | 7 +--
      .../hive/llap/io/api/impl/LlapIoImpl.java | 17 +++++--
      .../llap/io/decode/OrcColumnVectorProducer.java | 9 ++--
      .../llap/io/encoded/OrcEncodedDataReader.java | 7 +--
      .../org/apache/hadoop/hive/llap/LogLevels.java | 53 ++++++++++++++++++++
      10 files changed, 95 insertions(+), 25 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
    ----------------------------------------------------------------------
    diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
    index 82d064d..b359850 100644
    --- a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
    +++ b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
    @@ -107,7 +107,9 @@ public class CliDriver {
          SessionState ss = SessionState.get();
          conf = (ss != null) ? ss.getConf() : new Configuration();
          Logger LOG = LoggerFactory.getLogger("CliDriver");
    - LOG.debug("CliDriver inited with classpath {}", System.getProperty("java.class.path"));
    + if (LOG.isDebugEnabled()) {
    + LOG.debug("CliDriver inited with classpath {}", System.getProperty("java.class.path"));
    + }
          console = new LogHelper(LOG);
          originalThreadName = Thread.currentThread().getName();
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
    index f69ac5b..2aca68d 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
    @@ -48,10 +48,11 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
          maxAllocation = HiveConf.getIntVar(conf, ConfVars.LLAP_ORC_CACHE_MAX_ALLOC);
          arenaSize = HiveConf.getIntVar(conf, ConfVars.LLAP_ORC_CACHE_ARENA_SIZE);
          long maxSizeVal = HiveConf.getLongVar(conf, ConfVars.LLAP_ORC_CACHE_MAX_SIZE);
    - LlapIoImpl.LOG.info("Buddy allocator with {}", (isDirect ? "direct" : "byte")
    - , " buffers; allocation sizes {} ", minAllocation, " - {}", maxAllocation
    - , ", arena size {}", arenaSize, ". total size {}", maxSizeVal);
    -
    + if (LlapIoImpl.LOGL.isInfoEnabled()) {
    + LlapIoImpl.LOG.info("Buddy allocator with " + (isDirect ? "direct" : "byte")
    + + " buffers; allocation sizes " + minAllocation + " - " + maxAllocation
    + + ", arena size " + arenaSize + ". total size " + maxSizeVal);
    + }

          if (minAllocation < 8) {
            throw new AssertionError("Min allocation must be at least 8: " + minAllocation);

    http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
    index e7b8f1a..c2a130a 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
    @@ -58,8 +58,9 @@ public class LowLevelCacheImpl implements LowLevelCache, LlapOomDebugDump {
        @VisibleForTesting
        LowLevelCacheImpl(LlapDaemonCacheMetrics metrics, LowLevelCachePolicy cachePolicy,
            EvictionAwareAllocator allocator, boolean doAssumeGranularBlocks, long cleanupInterval) {
    - LlapIoImpl.LOG.info("Low level cache; cleanup interval {}", cleanupInterval, "sec");
    -
    + if (LlapIoImpl.LOGL.isInfoEnabled()) {
    + LlapIoImpl.LOG.info("Low level cache; cleanup interval " + cleanupInterval + "sec");
    + }
          this.cachePolicy = cachePolicy;
          this.allocator = allocator;
          this.cleanupInterval = cleanupInterval;

    http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
    index 8a39e35..4a256ee 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
    @@ -44,8 +44,9 @@ public class LowLevelCacheMemoryManager implements MemoryManager {
          this.usedMemory = new AtomicLong(0);
          this.metrics = metrics;
          metrics.incrCacheCapacityTotal(maxSize);
    - LlapIoImpl.LOG.info("Cache memory manager initialized with max size {}", maxSize);
    -
    + if (LlapIoImpl.LOGL.isInfoEnabled()) {
    + LlapIoImpl.LOG.info("Cache memory manager initialized with max size " + maxSize);
    + }
        }

        @Override

    http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
    index 0838682..1430eae 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelFifoCachePolicy.java
    @@ -35,7 +35,9 @@ public class LowLevelFifoCachePolicy implements LowLevelCachePolicy {
        private LlapOomDebugDump parentDebugDump;

        public LowLevelFifoCachePolicy(Configuration conf) {
    - LlapIoImpl.LOG.info("FIFO cache policy");
    + if (LlapIoImpl.LOGL.isInfoEnabled()) {
    + LlapIoImpl.LOG.info("FIFO cache policy");
    + }
          buffers = new LinkedList<LlapCacheableBuffer>();
        }


    http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
    index 49e1b59..76e7605 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelLrfuCachePolicy.java
    @@ -78,9 +78,10 @@ public class LowLevelLrfuCachePolicy implements LowLevelCachePolicy {
            int lrfuThreshold = (int)((Math.log(1 - Math.pow(0.5, lambda)) / Math.log(0.5)) / lambda);
            maxHeapSize = Math.min(lrfuThreshold, maxBuffers);
          }
    - LlapIoImpl.LOG.info("LRFU cache policy with min buffer size {}", minBufferSize
    - , " and lambda {}", lambda, " (heap size {} ", maxHeapSize + ")");
    -
    + if (LlapIoImpl.LOGL.isInfoEnabled()) {
    + LlapIoImpl.LOG.info("LRFU cache policy with min buffer size " + minBufferSize
    + + " and lambda " + lambda + " (heap size " + maxHeapSize + ")");
    + }

          heap = new LlapCacheableBuffer[maxHeapSize];
          listHead = listTail = null;

    http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
    index 83a88f5..b38f472 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
    @@ -18,6 +18,8 @@

      package org.apache.hadoop.hive.llap.io.api.impl;

    +import org.apache.hadoop.hive.llap.LogLevels;
    +
      import java.io.IOException;
      import java.util.concurrent.Executors;

    @@ -56,19 +58,21 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;

      public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
        public static final Logger LOG = LoggerFactory.getLogger(LlapIoImpl.class);
    + public static final LogLevels LOGL = new LogLevels(LOG);

        private final ColumnVectorProducer cvp;
        private final ListeningExecutorService executor;
    - private final LlapDaemonCacheMetrics cacheMetrics;
    - private final LlapDaemonQueueMetrics queueMetrics;
    + private LlapDaemonCacheMetrics cacheMetrics;
    + private LlapDaemonQueueMetrics queueMetrics;
        private ObjectName buddyAllocatorMXBean;
        private EvictionAwareAllocator allocator;

        private LlapIoImpl(Configuration conf) throws IOException {
          boolean useLowLevelCache = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_LOW_LEVEL_CACHE);
          // High-level cache not supported yet.
    - LOG.info("Initializing LLAP IO {}", useLowLevelCache ? " with low level cache" : "");
    -
    + if (LOGL.isInfoEnabled()) {
    + LOG.info("Initializing LLAP IO" + (useLowLevelCache ? " with low level cache" : ""));
    + }

          String displayName = "LlapDaemonCacheMetrics-" + MetricsUtils.getHostName();
          String sessionId = conf.get("llap.daemon.metrics.sessionid");
    @@ -111,7 +115,10 @@ public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
          // TODO: this should depends on input format and be in a map, or something.
          this.cvp = new OrcColumnVectorProducer(metadataCache, orcCache, cache, conf, cacheMetrics,
              queueMetrics);
    - LOG.info("LLAP IO initialized");
    + if (LOGL.isInfoEnabled()) {
    + LOG.info("LLAP IO initialized");
    + }
    +
          registerMXBeans();
        }


    http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
    index 38c31d3..259c483 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
    @@ -43,15 +43,16 @@ public class OrcColumnVectorProducer implements ColumnVectorProducer {
        private final Cache<OrcCacheKey> cache;
        private final LowLevelCache lowLevelCache;
        private final Configuration conf;
    - private final boolean _skipCorrupt; // TODO: get rid of this
    - private final LlapDaemonCacheMetrics cacheMetrics;
    - private final LlapDaemonQueueMetrics queueMetrics;
    + private boolean _skipCorrupt; // TODO: get rid of this
    + private LlapDaemonCacheMetrics cacheMetrics;
    + private LlapDaemonQueueMetrics queueMetrics;

        public OrcColumnVectorProducer(OrcMetadataCache metadataCache,
            LowLevelCacheImpl lowLevelCache, Cache<OrcCacheKey> cache, Configuration conf,
            LlapDaemonCacheMetrics metrics, LlapDaemonQueueMetrics queueMetrics) {
    + if (LlapIoImpl.LOGL.isInfoEnabled()) {
            LlapIoImpl.LOG.info("Initializing ORC column vector producer");
    -
    + }

          this.metadataCache = metadataCache;
          this.lowLevelCache = lowLevelCache;

    http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
    index e625490..9bdafc9 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
    @@ -198,8 +198,9 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>

        protected Void performDataRead() throws IOException {
          long startTime = counters.startTimeCounter();
    - LlapIoImpl.LOG.info("Processing data for {}", split.getPath());
    -
    + if (LlapIoImpl.LOGL.isInfoEnabled()) {
    + LlapIoImpl.LOG.info("Processing data for " + split.getPath());
    + }
          if (processStop()) {
            recordReaderTime(startTime);
            return null;
    @@ -744,7 +745,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
          long offset = split.getStart(), maxOffset = offset + split.getLength();
          stripeIxFrom = -1;
          int stripeIxTo = -1;
    - if (LlapIoImpl.LOG.isDebugEnabled()) {
    + if (LlapIoImpl.LOGL.isDebugEnabled()) {
            String tmp = "FileSplit {" + split.getStart() + ", " + split.getLength() + "}; stripes ";
            for (StripeInformation stripe : stripes) {
              tmp += "{" + stripe.getOffset() + ", " + stripe.getLength() + "}, ";

    http://git-wip-us.apache.org/repos/asf/hive/blob/6fda3b55/ql/src/java/org/apache/hadoop/hive/llap/LogLevels.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LogLevels.java b/ql/src/java/org/apache/hadoop/hive/llap/LogLevels.java
    new file mode 100644
    index 0000000..300230f
    --- /dev/null
    +++ b/ql/src/java/org/apache/hadoop/hive/llap/LogLevels.java
    @@ -0,0 +1,53 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional debugrmation
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.hadoop.hive.llap;
    +
    +import org.slf4j.Logger;
    +
    +public class LogLevels {
    + private final boolean isT, isD, isI, isW, isE;
    +
    + public LogLevels(Logger log) {
    + isT = log.isTraceEnabled();
    + isD = log.isDebugEnabled();
    + isI = log.isInfoEnabled();
    + isW = log.isWarnEnabled();
    + isE = log.isErrorEnabled();
    + }
    +
    + public boolean isTraceEnabled() {
    + return isT;
    + }
    +
    + public boolean isDebugEnabled() {
    + return isD;
    + }
    +
    + public boolean isInfoEnabled() {
    + return isI;
    + }
    +
    + public boolean isWarnEnabled() {
    + return isW;
    + }
    +
    + public boolean isErrorEnabled() {
    + return isE;
    + }
    +}
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-12266 When client exists abnormally, it doesn't release ACID locks (Wei Zheng, via Eugene Koifman)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/47617d31
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/47617d31
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/47617d31

    Branch: refs/heads/master-fixed
    Commit: 47617d31f347a0ba78ebfc903738b39dd960b19b
    Parents: d7c0485
    Author: Eugene Koifman <ekoifman@hortonworks.com>
    Authored: Tue Nov 3 09:03:54 2015 -0800
    Committer: Eugene Koifman <ekoifman@hortonworks.com>
    Committed: Tue Nov 3 09:03:54 2015 -0800

    ----------------------------------------------------------------------
      .../java/org/apache/hadoop/hive/ql/Driver.java | 43 +++++++++++++++-----
      1 file changed, 32 insertions(+), 11 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/47617d31/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    index 18052f3..93c7a54 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    @@ -121,12 +121,14 @@ import org.apache.hadoop.hive.shims.Utils;
      import org.apache.hadoop.mapred.ClusterStatus;
      import org.apache.hadoop.mapred.JobClient;
      import org.apache.hadoop.mapred.JobConf;
    +import org.apache.hive.common.util.ShutdownHookManager;

      public class Driver implements CommandProcessor {

        static final private String CLASS_NAME = Driver.class.getName();
        private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
        static final private LogHelper console = new LogHelper(LOG);
    + static final int SHUTDOWN_HOOK_PRIORITY = 0;

        private int maxRows = 100;
        ByteStream.Output bos = new ByteStream.Output();
    @@ -390,7 +392,20 @@ public class Driver implements CommandProcessor {

          try {
            // Initialize the transaction manager. This must be done before analyze is called.
    - SessionState.get().initTxnMgr(conf);
    + final HiveTxnManager txnManager = SessionState.get().initTxnMgr(conf);
    + // In case when user Ctrl-C twice to kill Hive CLI JVM, we want to release locks
    + ShutdownHookManager.addShutdownHook(
    + new Runnable() {
    + @Override
    + public void run() {
    + try {
    + releaseLocksAndCommitOrRollback(false, txnManager);
    + } catch (LockException e) {
    + LOG.warn("Exception when releasing locks in ShutdownHook for Driver: " +
    + e.getMessage());
    + }
    + }
    + }, SHUTDOWN_HOOK_PRIORITY);

            command = new VariableSubstitution(new HiveVariableSource() {
              @Override
    @@ -537,7 +552,7 @@ public class Driver implements CommandProcessor {
         *
         * @param sem semantic analyzer for analyzed query
         * @param plan query plan
    - * @param astStringTree AST tree dump
    + * @param astTree AST tree dump
         * @throws java.io.IOException
         */
        private String getExplainOutput(BaseSemanticAnalyzer sem, QueryPlan plan,
    @@ -1049,15 +1064,21 @@ public class Driver implements CommandProcessor {
        /**
         * @param commit if there is an open transaction and if true, commit,
         * if false rollback. If there is no open transaction this parameter is ignored.
    + * @param txnManager an optional existing transaction manager retrieved earlier from the session
         *
         **/
    - private void releaseLocksAndCommitOrRollback(boolean commit)
    + private void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnManager)
            throws LockException {
          PerfLogger perfLogger = SessionState.getPerfLogger();
          perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RELEASE_LOCKS);

    - SessionState ss = SessionState.get();
    - HiveTxnManager txnMgr = ss.getTxnMgr();
    + HiveTxnManager txnMgr;
    + if (txnManager == null) {
    + SessionState ss = SessionState.get();
    + txnMgr = ss.getTxnMgr();
    + } else {
    + txnMgr = txnManager;
    + }
          // If we've opened a transaction we need to commit or rollback rather than explicitly
          // releasing the locks.
          if (txnMgr.isTxnOpen()) {
    @@ -1206,7 +1227,7 @@ public class Driver implements CommandProcessor {
          }
          if (ret != 0) {
            try {
    - releaseLocksAndCommitOrRollback(false);
    + releaseLocksAndCommitOrRollback(false, null);
            } catch (LockException e) {
              LOG.warn("Exception in releasing locks. "
                  + org.apache.hadoop.util.StringUtils.stringifyException(e));
    @@ -1287,7 +1308,7 @@ public class Driver implements CommandProcessor {
              if(plan.getAutoCommitValue() && !txnManager.getAutoCommit()) {
                /*here, if there is an open txn, we want to commit it; this behavior matches
                * https://docs.oracle.com/javase/6/docs/api/java/sql/Connection.html#setAutoCommit(boolean)*/
    - releaseLocksAndCommitOrRollback(true);
    + releaseLocksAndCommitOrRollback(true, null);
                txnManager.setAutoCommit(true);
              }
              else if(!plan.getAutoCommitValue() && txnManager.getAutoCommit()) {
    @@ -1315,10 +1336,10 @@ public class Driver implements CommandProcessor {
          //if needRequireLock is false, the release here will do nothing because there is no lock
          try {
            if(txnManager.getAutoCommit() || plan.getOperation() == HiveOperation.COMMIT) {
    - releaseLocksAndCommitOrRollback(true);
    + releaseLocksAndCommitOrRollback(true, null);
            }
            else if(plan.getOperation() == HiveOperation.ROLLBACK) {
    - releaseLocksAndCommitOrRollback(false);
    + releaseLocksAndCommitOrRollback(false, null);
            }
            else {
              //txn (if there is one started) is not finished
    @@ -1349,7 +1370,7 @@ public class Driver implements CommandProcessor {
        private CommandProcessorResponse rollback(CommandProcessorResponse cpr) {
          //console.printError(cpr.toString());
          try {
    - releaseLocksAndCommitOrRollback(false);
    + releaseLocksAndCommitOrRollback(false, null);
          }
          catch (LockException e) {
            LOG.error("rollback() FAILED: " + cpr);//make sure not to loose
    @@ -1897,7 +1918,7 @@ public class Driver implements CommandProcessor {
          destroyed = true;
          if (!hiveLocks.isEmpty()) {
            try {
    - releaseLocksAndCommitOrRollback(false);
    + releaseLocksAndCommitOrRollback(false, null);
            } catch (LockException e) {
              LOG.warn("Exception when releasing locking in destroy: " +
                  e.getMessage());
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-12235 : Improve beeline logging for dynamic service discovery (Szehon, reviewed by Vaibhav Gumashta)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/492a10f1
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/492a10f1
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/492a10f1

    Branch: refs/heads/master-fixed
    Commit: 492a10f101471226004b6f571d7f8c8a79103664
    Parents: 902a548
    Author: Szehon Ho <szehon@cloudera.com>
    Authored: Mon Nov 2 16:38:03 2015 -0800
    Committer: Szehon Ho <szehon@cloudera.com>
    Committed: Mon Nov 2 16:38:03 2015 -0800

    ----------------------------------------------------------------------
      beeline/src/main/resources/beeline-log4j2.xml | 4 +++-
      .../org/apache/hive/jdbc/HiveConnection.java | 21 ++++++++++++++------
      2 files changed, 18 insertions(+), 7 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/492a10f1/beeline/src/main/resources/beeline-log4j2.xml
    ----------------------------------------------------------------------
    diff --git a/beeline/src/main/resources/beeline-log4j2.xml b/beeline/src/main/resources/beeline-log4j2.xml
    index a64f55e..55ec0f5 100644
    --- a/beeline/src/main/resources/beeline-log4j2.xml
    +++ b/beeline/src/main/resources/beeline-log4j2.xml
    @@ -34,6 +34,8 @@
          <Root level="${sys:hive.log.level}">
            <AppenderRef ref="${sys:hive.root.logger}"/>
          </Root>
    + <!-- HiveConnection logs useful info for dynamic service discovery -->
    + <logger name="org.apache.hive.jdbc.HiveConnection" level="INFO"/>
        </Loggers>

    -</Configuration>
    +</Configuration>
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/492a10f1/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
    ----------------------------------------------------------------------
    diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
    index e38c585..f79d73d 100644
    --- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
    +++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
    @@ -204,16 +204,14 @@ public class HiveConnection implements java.sql.Connection {
                      .get(JdbcConnectionParams.AUTH_KERBEROS_AUTH_TYPE));
              transport = isHttpTransportMode() ? createHttpTransport() : createBinaryTransport();
              if (!transport.isOpen()) {
    - LOG.info("Will try to open client transport with JDBC Uri: " + jdbcUriString);
                transport.open();
    + logZkDiscoveryMessage("Connected to " + connParams.getHost() + ":" + connParams.getPort());
              }
              break;
            } catch (TTransportException e) {
    - LOG.info("Could not open client transport with JDBC Uri: " + jdbcUriString);
              // We'll retry till we exhaust all HiveServer2 nodes from ZooKeeper
    - if ((sessConfMap.get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE) != null)
    - && (JdbcConnectionParams.SERVICE_DISCOVERY_MODE_ZOOKEEPER.equalsIgnoreCase(sessConfMap
    - .get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE)))) {
    + if (isZkDynamicDiscoveryMode()) {
    + LOG.info("Failed to connect to " + connParams.getHost() + ":" + connParams.getPort());
                try {
                  // Update jdbcUriString, host & port variables in connParams
                  // Throw an exception if all HiveServer2 nodes have been exhausted,
    @@ -228,7 +226,6 @@ public class HiveConnection implements java.sql.Connection {
                jdbcUriString = connParams.getJdbcUriString();
                host = connParams.getHost();
                port = connParams.getPort();
    - LOG.info("Will retry opening client transport");
              } else {
                LOG.info("Transport Used for JDBC connection: " +
                  sessConfMap.get(JdbcConnectionParams.TRANSPORT_MODE));
    @@ -650,6 +647,18 @@ public class HiveConnection implements java.sql.Connection {
          return false;
        }

    + private boolean isZkDynamicDiscoveryMode() {
    + return (sessConfMap.get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE) != null)
    + && (JdbcConnectionParams.SERVICE_DISCOVERY_MODE_ZOOKEEPER.equalsIgnoreCase(sessConfMap
    + .get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE)));
    + }
    +
    + private void logZkDiscoveryMessage(String message) {
    + if (isZkDynamicDiscoveryMode()) {
    + LOG.info(message);
    + }
    + }
    +
        /**
         * Lookup varName in sessConfMap, if its null or empty return the default
         * value varDefault
  • Jxiang at Nov 6, 2015 at 5:32 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/pcs.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/pcs.q.out b/ql/src/test/results/clientpositive/pcs.q.out
    new file mode 100644
    index 0000000..5cf0dff
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/pcs.q.out
    @@ -0,0 +1,2249 @@
    +PREHOOK: query: drop table pcs_t1
    +PREHOOK: type: DROPTABLE
    +POSTHOOK: query: drop table pcs_t1
    +POSTHOOK: type: DROPTABLE
    +PREHOOK: query: drop table pcs_t2
    +PREHOOK: type: DROPTABLE
    +POSTHOOK: query: drop table pcs_t2
    +POSTHOOK: type: DROPTABLE
    +PREHOOK: query: create table pcs_t1 (key int, value string) partitioned by (ds string)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@pcs_t1
    +POSTHOOK: query: create table pcs_t1 (key int, value string) partitioned by (ds string)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@pcs_t1
    +PREHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-08') select * from src where key < 20 order by key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Output: default@pcs_t1@ds=2000-04-08
    +POSTHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-08') select * from src where key < 20 order by key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: default@pcs_t1@ds=2000-04-08
    +POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-08).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
    +POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-08).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
    +PREHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-09') select * from src where key < 20 order by key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Output: default@pcs_t1@ds=2000-04-09
    +POSTHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-09') select * from src where key < 20 order by key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: default@pcs_t1@ds=2000-04-09
    +POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-09).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
    +POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-09).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
    +PREHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-10') select * from src where key < 20 order by key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Output: default@pcs_t1@ds=2000-04-10
    +POSTHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-10') select * from src where key < 20 order by key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: default@pcs_t1@ds=2000-04-10
    +POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-10).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
    +POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
    +PREHOOK: query: analyze table pcs_t1 partition(ds) compute statistics
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@pcs_t1
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-08
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-09
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-10
    +PREHOOK: Output: default@pcs_t1
    +PREHOOK: Output: default@pcs_t1@ds=2000-04-08
    +PREHOOK: Output: default@pcs_t1@ds=2000-04-09
    +PREHOOK: Output: default@pcs_t1@ds=2000-04-10
    +POSTHOOK: query: analyze table pcs_t1 partition(ds) compute statistics
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@pcs_t1
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-10
    +POSTHOOK: Output: default@pcs_t1
    +POSTHOOK: Output: default@pcs_t1@ds=2000-04-08
    +POSTHOOK: Output: default@pcs_t1@ds=2000-04-09
    +POSTHOOK: Output: default@pcs_t1@ds=2000-04-10
    +PREHOOK: query: analyze table pcs_t1 partition(ds) compute statistics for columns
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@pcs_t1
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-08
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-09
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-10
    +#### A masked pattern was here ####
    +POSTHOOK: query: analyze table pcs_t1 partition(ds) compute statistics for columns
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@pcs_t1
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-10
    +#### A masked pattern was here ####
    +PREHOOK: query: explain extended select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain extended select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_TABREF
    + TOK_TABNAME
    + pcs_t1
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + TOK_TABLE_OR_COL
    + key
    + TOK_SELEXPR
    + TOK_TABLE_OR_COL
    + value
    + TOK_SELEXPR
    + TOK_TABLE_OR_COL
    + ds
    + TOK_WHERE
    + or
    + and
    + =
    + TOK_TABLE_OR_COL
    + ds
    + '2000-04-08'
    + =
    + TOK_TABLE_OR_COL
    + key
    + 1
    + and
    + =
    + TOK_TABLE_OR_COL
    + ds
    + '2000-04-09'
    + =
    + TOK_TABLE_OR_COL
    + key
    + 2
    + TOK_ORDERBY
    + TOK_TABSORTCOLNAMEASC
    + TOK_TABLE_OR_COL
    + key
    + TOK_TABSORTCOLNAMEASC
    + TOK_TABLE_OR_COL
    + value
    + TOK_TABSORTCOLNAMEASC
    + TOK_TABLE_OR_COL
    + ds
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: pcs_t1
    + Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: (struct(key,ds)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09')) (type: boolean)
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: key (type: int), value (type: string), ds (type: string)
    + outputColumnNames: _col0, _col1, _col2
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string)
    + sort order: +++
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + tag: -1
    + auto parallelism: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: ds=2000-04-08
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-08
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    +#### A masked pattern was here ####
    + Partition
    + base file name: ds=2000-04-09
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-09
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Truncated Path -> Alias:
    + /pcs_t1/ds=2000-04-08 [pcs_t1]
    + /pcs_t1/ds=2000-04-09 [pcs_t1]
    + Needs Tagging: false
    + Reduce Operator Tree:
    + Select Operator
    + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string)
    + outputColumnNames: _col0, _col1, _col2
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + columns _col0,_col1,_col2
    + columns.types int:string:string
    + escape.delim \
    + hive.serialization.extend.additional.nesting.levels true
    + serialization.escape.crlf true
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@pcs_t1
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-08
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-09
    +#### A masked pattern was here ####
    +POSTHOOK: query: select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@pcs_t1
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
    +#### A masked pattern was here ####
    +2 val_2 2000-04-09
    +PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_TABREF
    + TOK_TABNAME
    + pcs_t1
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + TOK_TABLE_OR_COL
    + ds
    + TOK_WHERE
    + TOK_FUNCTION
    + in
    + TOK_FUNCTION
    + struct
    + TOK_TABLE_OR_COL
    + ds
    + TOK_TABLE_OR_COL
    + key
    + TOK_FUNCTION
    + struct
    + '2000-04-08'
    + 1
    + TOK_FUNCTION
    + struct
    + '2000-04-09'
    + 2
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: pcs_t1
    + Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: (struct(ds,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean)
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: ds (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + columns _col0
    + columns.types string
    + escape.delim \
    + hive.serialization.extend.additional.nesting.levels true
    + serialization.escape.crlf true
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: ds=2000-04-08
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-08
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    +#### A masked pattern was here ####
    + Partition
    + base file name: ds=2000-04-09
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-09
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Truncated Path -> Alias:
    + /pcs_t1/ds=2000-04-08 [pcs_t1]
    + /pcs_t1/ds=2000-04-09 [pcs_t1]
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@pcs_t1
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-08
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-09
    +#### A masked pattern was here ####
    +POSTHOOK: query: select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@pcs_t1
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
    +#### A masked pattern was here ####
    +2000-04-09
    +PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4))
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4))
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_TABREF
    + TOK_TABNAME
    + pcs_t1
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + TOK_TABLE_OR_COL
    + ds
    + TOK_WHERE
    + TOK_FUNCTION
    + in
    + TOK_FUNCTION
    + struct
    + TOK_TABLE_OR_COL
    + ds
    + +
    + TOK_TABLE_OR_COL
    + key
    + 2
    + TOK_FUNCTION
    + struct
    + '2000-04-08'
    + 3
    + TOK_FUNCTION
    + struct
    + '2000-04-09'
    + 4
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: pcs_t1
    + Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: (struct(ds,(key + 2))) IN (const struct('2000-04-08',3), const struct('2000-04-09',4)) (type: boolean)
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: ds (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + columns _col0
    + columns.types string
    + escape.delim \
    + hive.serialization.extend.additional.nesting.levels true
    + serialization.escape.crlf true
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: ds=2000-04-08
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-08
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    +#### A masked pattern was here ####
    + Partition
    + base file name: ds=2000-04-09
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-09
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Truncated Path -> Alias:
    + /pcs_t1/ds=2000-04-08 [pcs_t1]
    + /pcs_t1/ds=2000-04-09 [pcs_t1]
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4))
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@pcs_t1
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-08
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-09
    +#### A masked pattern was here ####
    +POSTHOOK: query: select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4))
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@pcs_t1
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
    +#### A masked pattern was here ####
    +2000-04-09
    +PREHOOK: query: explain extended select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'))
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain extended select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'))
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_JOIN
    + TOK_TABREF
    + TOK_TABNAME
    + pcs_t1
    + a
    + TOK_TABREF
    + TOK_TABNAME
    + pcs_t1
    + b
    + =
    + .
    + TOK_TABLE_OR_COL
    + a
    + ds
    + .
    + TOK_TABLE_OR_COL
    + b
    + ds
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_HINTLIST
    + TOK_HINT
    + TOK_MAPJOIN
    + TOK_HINTARGLIST
    + pcs_t1
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + a
    + ds
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + TOK_WHERE
    + TOK_FUNCTION
    + in
    + TOK_FUNCTION
    + struct
    + .
    + TOK_TABLE_OR_COL
    + a
    + ds
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + .
    + TOK_TABLE_OR_COL
    + b
    + ds
    + TOK_FUNCTION
    + struct
    + '2000-04-08'
    + 1
    + '2000-04-09'
    + TOK_FUNCTION
    + struct
    + '2000-04-09'
    + 2
    + '2000-04-08'
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Reduce Output Operator
    + key expressions: ds (type: string)
    + sort order: +
    + Map-reduce partition columns: ds (type: string)
    + Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
    + tag: 0
    + value expressions: key (type: int)
    + auto parallelism: false
    + TableScan
    + alias: b
    + Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Reduce Output Operator
    + key expressions: ds (type: string)
    + sort order: +
    + Map-reduce partition columns: ds (type: string)
    + Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
    + tag: 1
    + value expressions: key (type: int)
    + auto parallelism: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: ds=2000-04-08
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-08
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    +#### A masked pattern was here ####
    + Partition
    + base file name: ds=2000-04-09
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-09
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Truncated Path -> Alias:
    + /pcs_t1/ds=2000-04-08 [a, b]
    + /pcs_t1/ds=2000-04-09 [a, b]
    + Needs Tagging: true
    + Reduce Operator Tree:
    + Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 ds (type: string)
    + 1 ds (type: string)
    + outputColumnNames: _col0, _col2, _col6, _col8
    + Statistics: Num rows: 44 Data size: 352 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + isSamplingPred: false
    + predicate: (struct(_col2,_col0,_col8)) IN (const struct('2000-04-08',1,'2000-04-09'), const struct('2000-04-09',2,'2000-04-08')) (type: boolean)
    + Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: _col2 (type: string), _col6 (type: int)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + columns _col0,_col1
    + columns.types string:int
    + escape.delim \
    + hive.serialization.extend.additional.nesting.levels true
    + serialization.escape.crlf true
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'))
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@pcs_t1
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-08
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-09
    +#### A masked pattern was here ####
    +POSTHOOK: query: select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'))
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@pcs_t1
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
    +#### A masked pattern was here ####
    +PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_TABREF
    + TOK_TABNAME
    + pcs_t1
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + TOK_TABLE_OR_COL
    + ds
    + TOK_WHERE
    + TOK_FUNCTION
    + in
    + TOK_FUNCTION
    + struct
    + TOK_TABLE_OR_COL
    + ds
    + +
    + TOK_TABLE_OR_COL
    + key
    + TOK_TABLE_OR_COL
    + key
    + TOK_FUNCTION
    + struct
    + '2000-04-08'
    + 1
    + TOK_FUNCTION
    + struct
    + '2000-04-09'
    + 2
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: pcs_t1
    + Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: (struct(ds,(key + key))) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean)
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: ds (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + columns _col0
    + columns.types string
    + escape.delim \
    + hive.serialization.extend.additional.nesting.levels true
    + serialization.escape.crlf true
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: ds=2000-04-08
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-08
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    +#### A masked pattern was here ####
    + Partition
    + base file name: ds=2000-04-09
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-09
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Truncated Path -> Alias:
    + /pcs_t1/ds=2000-04-08 [pcs_t1]
    + /pcs_t1/ds=2000-04-09 [pcs_t1]
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@pcs_t1
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-08
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-09
    +#### A masked pattern was here ####
    +POSTHOOK: query: select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@pcs_t1
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
    +#### A masked pattern was here ####
    +PREHOOK: query: explain select lag(key) over (partition by key) as c1
    +from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain select lag(key) over (partition by key) as c1
    +from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: pcs_t1
    + Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (struct(ds,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean)
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: key (type: int)
    + sort order: +
    + Map-reduce partition columns: key (type: int)
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + Reduce Operator Tree:
    + Select Operator
    + expressions: KEY.reducesinkkey0 (type: int)
    + outputColumnNames: _col0
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + PTF Operator
    + Function definitions:
    + Input definition
    + input alias: ptf_0
    + output shape: _col0: int
    + type: WINDOWING
    + Windowing table definition
    + input alias: ptf_1
    + name: windowingtablefunction
    + order by: _col0
    + partition by: _col0
    + raw input shape:
    + window functions:
    + window function definition
    + alias: lag_window_0
    + arguments: _col0
    + name: lag
    + window function: GenericUDAFLagEvaluator
    + window frame: PRECEDING(MAX)~FOLLOWING(MAX)
    + isPivotResult: true
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: lag_window_0 (type: int)
    + outputColumnNames: _col0
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: select lag(key) over (partition by key) as c1
    +from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@pcs_t1
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-08
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-09
    +#### A masked pattern was here ####
    +POSTHOOK: query: select lag(key) over (partition by key) as c1
    +from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@pcs_t1
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
    +#### A masked pattern was here ####
    +NULL
    +PREHOOK: query: EXPLAIN EXTENDED
    +SELECT * FROM (
    + SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    + UNION ALL
    + SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +) A
    +WHERE A.ds = '2008-04-08'
    +SORT BY A.key, A.value, A.ds
    +PREHOOK: type: QUERY
    +POSTHOOK: query: EXPLAIN EXTENDED
    +SELECT * FROM (
    + SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    + UNION ALL
    + SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +) A
    +WHERE A.ds = '2008-04-08'
    +SORT BY A.key, A.value, A.ds
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_SUBQUERY
    + TOK_UNIONALL
    + TOK_QUERY
    + TOK_FROM
    + TOK_TABREF
    + TOK_TABNAME
    + pcs_t1
    + X
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + TOK_ALLCOLREF
    + TOK_TABNAME
    + X
    + TOK_WHERE
    + TOK_FUNCTION
    + in
    + TOK_FUNCTION
    + struct
    + .
    + TOK_TABLE_OR_COL
    + X
    + ds
    + .
    + TOK_TABLE_OR_COL
    + X
    + key
    + TOK_FUNCTION
    + struct
    + '2000-04-08'
    + 1
    + TOK_FUNCTION
    + struct
    + '2000-04-09'
    + 2
    + TOK_QUERY
    + TOK_FROM
    + TOK_TABREF
    + TOK_TABNAME
    + pcs_t1
    + Y
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + TOK_ALLCOLREF
    + TOK_TABNAME
    + Y
    + TOK_WHERE
    + TOK_FUNCTION
    + in
    + TOK_FUNCTION
    + struct
    + .
    + TOK_TABLE_OR_COL
    + Y
    + ds
    + .
    + TOK_TABLE_OR_COL
    + Y
    + key
    + TOK_FUNCTION
    + struct
    + '2000-04-08'
    + 1
    + TOK_FUNCTION
    + struct
    + '2000-04-09'
    + 2
    + A
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + TOK_ALLCOLREF
    + TOK_WHERE
    + =
    + .
    + TOK_TABLE_OR_COL
    + A
    + ds
    + '2008-04-08'
    + TOK_SORTBY
    + TOK_TABSORTCOLNAMEASC
    + .
    + TOK_TABLE_OR_COL
    + A
    + key
    + TOK_TABSORTCOLNAMEASC
    + .
    + TOK_TABLE_OR_COL
    + A
    + value
    + TOK_TABSORTCOLNAMEASC
    + .
    + TOK_TABLE_OR_COL
    + A
    + ds
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: x
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: ((struct(ds,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) and (ds = '2008-04-08')) (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: int), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Union
    + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: _col0 (type: int), _col1 (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: int), _col1 (type: string), '2008-04-08' (type: string)
    + sort order: +++
    + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + tag: -1
    + auto parallelism: false
    + TableScan
    + alias: y
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: ((struct(ds,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) and (ds = '2008-04-08')) (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: int), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Union
    + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: _col0 (type: int), _col1 (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: int), _col1 (type: string), '2008-04-08' (type: string)
    + sort order: +++
    + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + tag: -1
    + auto parallelism: false
    + Needs Tagging: false
    + Reduce Operator Tree:
    + Select Operator
    + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), '2008-04-08' (type: string)
    + outputColumnNames: _col0, _col1, _col2
    + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + columns _col0,_col1,_col2
    + columns.types int:string:string
    + escape.delim \
    + hive.serialization.extend.additional.nesting.levels true
    + serialization.escape.crlf true
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: SELECT * FROM (
    + SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    + UNION ALL
    + SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +) A
    +WHERE A.ds = '2008-04-08'
    +SORT BY A.key, A.value, A.ds
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@pcs_t1
    +#### A masked pattern was here ####
    +POSTHOOK: query: SELECT * FROM (
    + SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    + UNION ALL
    + SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +) A
    +WHERE A.ds = '2008-04-08'
    +SORT BY A.key, A.value, A.ds
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@pcs_t1
    +#### A masked pattern was here ####
    +PREHOOK: query: explain extended select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11))
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain extended select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11))
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_TABREF
    + TOK_TABNAME
    + pcs_t1
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + TOK_TABLE_OR_COL
    + ds
    + TOK_WHERE
    + TOK_FUNCTION
    + in
    + TOK_FUNCTION
    + struct
    + TOK_FUNCTION
    + when
    + =
    + TOK_TABLE_OR_COL
    + ds
    + '2000-04-08'
    + 10
    + 20
    + TOK_FUNCTION
    + struct
    + 10
    + TOK_FUNCTION
    + struct
    + 11
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-0 is a root stage
    +
    +STAGE PLANS:
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Partition Description:
    + Partition
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-08
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Processor Tree:
    + TableScan
    + alias: pcs_t1
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: (const struct(10)) IN (const struct(10), const struct(11)) (type: boolean)
    + Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: ds (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
    + ListSink
    +
    +PREHOOK: query: select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11))
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@pcs_t1
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-08
    +#### A masked pattern was here ####
    +POSTHOOK: query: select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11))
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@pcs_t1
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
    +#### A masked pattern was here ####
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +2000-04-08
    +PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key, rand(100)) in (struct('2000-04-08',1,0.2), struct('2000-04-09',2,0.3))
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key, rand(100)) in (struct('2000-04-08',1,0.2), struct('2000-04-09',2,0.3))
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_TABREF
    + TOK_TABNAME
    + pcs_t1
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + TOK_TABLE_OR_COL
    + ds
    + TOK_WHERE
    + TOK_FUNCTION
    + in
    + TOK_FUNCTION
    + struct
    + TOK_TABLE_OR_COL
    + ds
    + TOK_TABLE_OR_COL
    + key
    + TOK_FUNCTION
    + rand
    + 100
    + TOK_FUNCTION
    + struct
    + '2000-04-08'
    + 1
    + 0.2
    + TOK_FUNCTION
    + struct
    + '2000-04-09'
    + 2
    + 0.3
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: pcs_t1
    + Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: (struct(ds,key,rand(100))) IN (const struct('2000-04-08',1,0.2), const struct('2000-04-09',2,0.3)) (type: boolean)
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: ds (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + columns _col0
    + columns.types string
    + escape.delim \
    + hive.serialization.extend.additional.nesting.levels true
    + serialization.escape.crlf true
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: ds=2000-04-08
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-08
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    +#### A masked pattern was here ####
    + Partition
    + base file name: ds=2000-04-09
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-09
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Truncated Path -> Alias:
    + /pcs_t1/ds=2000-04-08 [pcs_t1]
    + /pcs_t1/ds=2000-04-09 [pcs_t1]
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3))
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3))
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_TABREF
    + TOK_TABNAME
    + pcs_t1
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + TOK_TABLE_OR_COL
    + ds
    + TOK_WHERE
    + TOK_FUNCTION
    + in
    + TOK_FUNCTION
    + struct
    + or
    + =
    + TOK_TABLE_OR_COL
    + ds
    + '2000-04-08'
    + =
    + TOK_TABLE_OR_COL
    + key
    + 2
    + TOK_TABLE_OR_COL
    + key
    + TOK_FUNCTION
    + struct
    + true
    + 2
    + TOK_FUNCTION
    + struct
    + false
    + 3
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-0 is a root stage
    +
    +STAGE PLANS:
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Partition Description:
    + Partition
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-08
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Partition
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-09
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Partition
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-10
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Processor Tree:
    + TableScan
    + alias: pcs_t1
    + Statistics: Num rows: 60 Data size: 480 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: (struct(((ds = '2000-04-08') or (key = 2)),key)) IN (const struct(true,2), const struct(false,3)) (type: boolean)
    + Statistics: Num rows: 30 Data size: 240 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: ds (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 30 Data size: 240 Basic stats: COMPLETE Column stats: NONE
    + ListSink
    +
    +PREHOOK: query: select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3))
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@pcs_t1
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-08
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-09
    +PREHOOK: Input: default@pcs_t1@ds=2000-04-10
    +#### A masked pattern was here ####
    +POSTHOOK: query: select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3))
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@pcs_t1
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
    +POSTHOOK: Input: default@pcs_t1@ds=2000-04-10
    +#### A masked pattern was here ####
    +2000-04-08
    +2000-04-09
    +2000-04-10
    +PREHOOK: query: explain extended select ds from pcs_t1 where key = 3 or (struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3)) and key+5 > 0)
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain extended select ds from pcs_t1 where key = 3 or (struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3)) and key+5 > 0)
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_TABREF
    + TOK_TABNAME
    + pcs_t1
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + TOK_TABLE_OR_COL
    + ds
    + TOK_WHERE
    + or
    + =
    + TOK_TABLE_OR_COL
    + key
    + 3
    + and
    + TOK_FUNCTION
    + in
    + TOK_FUNCTION
    + struct
    + or
    + =
    + TOK_TABLE_OR_COL
    + ds
    + '2000-04-08'
    + =
    + TOK_TABLE_OR_COL
    + key
    + 2
    + TOK_TABLE_OR_COL
    + key
    + TOK_FUNCTION
    + struct
    + true
    + 2
    + TOK_FUNCTION
    + struct
    + false
    + 3
    + >
    + +
    + TOK_TABLE_OR_COL
    + key
    + 5
    + 0
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-0 is a root stage
    +
    +STAGE PLANS:
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Partition Description:
    + Partition
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-08
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Partition
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-09
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Partition
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2000-04-10
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + numFiles 1
    + numRows 20
    + partition_columns ds
    + partition_columns.types string
    + rawDataSize 160
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 180
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments
    + columns.types int:string
    +#### A masked pattern was here ####
    + name default.pcs_t1
    + partition_columns ds
    + partition_columns.types string
    + serialization.ddl struct pcs_t1 { i32 key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.pcs_t1
    + name: default.pcs_t1
    + Processor Tree:
    + TableScan
    + alias: pcs_t1
    + Statistics: Num rows: 60 Data size: 480 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: ((key = 3) or ((struct(((ds = '2000-04-08') or (key = 2)),key)) IN (const struct(true,2), const struct(false,3)) and ((key + 5) > 0))) (type: boolean)
    + Statistics: Num rows: 40 Data size: 320 Basic stats: COMP

    <TRUNCATED>
  • Jxiang at Nov 6, 2015 at 5:32 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/pointlookup.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/pointlookup.q.out b/ql/src/test/results/clientpositive/pointlookup.q.out
    index 7e19be4..a99b388 100644
    --- a/ql/src/test/results/clientpositive/pointlookup.q.out
    +++ b/ql/src/test/results/clientpositive/pointlookup.q.out
    @@ -176,15 +176,15 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((value) IN ('1', '3', '5', '6', '8') and (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3'))) (type: boolean)
    - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
    + predicate: (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3')) (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: key (type: string)
                      outputColumnNames: _col0
    - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                      File Output Operator
                        compressed: false
    - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                        table:
                            input format: org.apache.hadoop.mapred.TextInputFormat
                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
    index 7e28c77..792ccaf 100644
    --- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
    +++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
    @@ -1236,21 +1236,6 @@ STAGE PLANS:
                            sort order: +
                            Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                            Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator
    - keys: _col0 (type: double)
    - mode: hash
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Dynamic Partitioning Event Operator
    - Target Input: srcpart
    - Partition key expr: UDFToDouble(hr)
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Target column: hr
    - Target Vertex: Map 1
              Reducer 2
                  Reduce Operator Tree:
                    Merge Join Operator
    @@ -3944,21 +3929,6 @@ STAGE PLANS:
                            sort order: +
                            Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                            Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator
    - keys: _col0 (type: double)
    - mode: hash
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Dynamic Partitioning Event Operator
    - Target Input: srcpart
    - Partition key expr: UDFToDouble(hr)
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Target column: hr
    - Target Vertex: Map 1
              Reducer 2
                  Reduce Operator Tree:
                    Group By Operator
    @@ -5063,21 +5033,6 @@ STAGE PLANS:
                                Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                                Target column: ds
                                Target Vertex: Map 1
    - Select Operator
    - expressions: UDFToDouble(hr) (type: double)
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator
    - keys: _col0 (type: double)
    - mode: hash
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
    - Dynamic Partitioning Event Operator
    - Target Input: srcpart_orc
    - Partition key expr: UDFToDouble(hr)
    - Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
    - Target column: hr
    - Target Vertex: Map 1
              Reducer 2
                  Reduce Operator Tree:
                    Group By Operator

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
    index 1103e80..da2033b 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
    @@ -1251,21 +1251,6 @@ STAGE PLANS:
                            sort order: +
                            Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                            Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator
    - keys: _col0 (type: double)
    - mode: hash
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Dynamic Partitioning Event Operator
    - Target Input: srcpart
    - Partition key expr: UDFToDouble(hr)
    - Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Target column: hr
    - Target Vertex: Map 1
              Reducer 2
                  Reduce Operator Tree:
                    Merge Join Operator
    @@ -3995,21 +3980,6 @@ STAGE PLANS:
                            sort order: +
                            Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                            Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator
    - keys: _col0 (type: double)
    - mode: hash
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Dynamic Partitioning Event Operator
    - Target Input: srcpart
    - Partition key expr: UDFToDouble(hr)
    - Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Target column: hr
    - Target Vertex: Map 1
              Reducer 2
                  Execution mode: vectorized
                  Reduce Operator Tree:
    @@ -5131,21 +5101,6 @@ STAGE PLANS:
                                Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                                Target column: ds
                                Target Vertex: Map 1
    - Select Operator
    - expressions: UDFToDouble(hr) (type: double)
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator
    - keys: _col0 (type: double)
    - mode: hash
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
    - Dynamic Partitioning Event Operator
    - Target Input: srcpart_orc
    - Partition key expr: UDFToDouble(hr)
    - Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
    - Target column: hr
    - Target Vertex: Map 1
              Reducer 2
                  Reduce Operator Tree:
                    Group By Operator
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-12209: Vectorize simple UDFs with null arguments (Gopal V, reviewed by Sergey Shelukhin)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/db2c5009
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/db2c5009
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/db2c5009

    Branch: refs/heads/master-fixed
    Commit: db2c5009b243aeb5be09225b03476d1c12ebef84
    Parents: 492a10f
    Author: Gopal V <gopalv@apache.org>
    Authored: Mon Nov 2 19:42:35 2015 -0800
    Committer: Gopal V <gopalv@apache.org>
    Committed: Mon Nov 2 19:42:35 2015 -0800

    ----------------------------------------------------------------------
      .../ql/exec/vector/VectorizationContext.java | 7 +-
      .../ql/exec/vector/udf/VectorUDFArgDesc.java | 19 ++--
      .../queries/clientpositive/vectorized_case.q | 19 ++++
      .../clientpositive/spark/vectorized_case.q.out | 109 +++++++++++++++++--
      .../clientpositive/tez/vectorized_case.q.out | 109 +++++++++++++++++--
      .../clientpositive/vectorized_case.q.out | 69 ++++++++++++
      6 files changed, 301 insertions(+), 31 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/db2c5009/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
    index 3489c9c..e7a829e 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
    @@ -2022,12 +2022,7 @@ public class VectorizationContext {
              variableArgPositions.add(i);
              argDescs[i].setVariable(getInputColumnIndex(((ExprNodeColumnDesc) child).getColumn()));
            } else if (child instanceof ExprNodeConstantDesc) {
    - if (((ExprNodeConstantDesc) child).getValue() == null) {
    - // cannot handle constant null at the moment
    - throw new HiveException("Unable to vectorize custom UDF. Custom udf containing "
    - + "constant null argument cannot be currently vectorized.");
    - }
    - // this is a constant
    + // this is a constant (or null)
              argDescs[i].setConstant((ExprNodeConstantDesc) child);
            } else {
              throw new HiveException("Unable to vectorize custom UDF. Encountered unsupported expr desc : "

    http://git-wip-us.apache.org/repos/asf/hive/blob/db2c5009/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFArgDesc.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFArgDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFArgDesc.java
    index e113980..6abfe63 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFArgDesc.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFArgDesc.java
    @@ -59,13 +59,18 @@ public class VectorUDFArgDesc implements Serializable {
         * during initialization.
         */
        public void prepareConstant() {
    - PrimitiveCategory pc = ((PrimitiveTypeInfo) constExpr.getTypeInfo())
    - .getPrimitiveCategory();
    -
    - // Convert from Java to Writable
    - Object writableValue = PrimitiveObjectInspectorFactory
    - .getPrimitiveJavaObjectInspector(pc).getPrimitiveWritableObject(
    - constExpr.getValue());
    + final Object writableValue;
    + if (constExpr != null) {
    + PrimitiveCategory pc = ((PrimitiveTypeInfo) constExpr.getTypeInfo())
    + .getPrimitiveCategory();
    +
    + // Convert from Java to Writable
    + writableValue = PrimitiveObjectInspectorFactory
    + .getPrimitiveJavaObjectInspector(pc).getPrimitiveWritableObject(
    + constExpr.getValue());
    + } else {
    + writableValue = null;
    + }

          constObjVal = new GenericUDF.DeferredJavaObject(writableValue);
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/db2c5009/ql/src/test/queries/clientpositive/vectorized_case.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/vectorized_case.q b/ql/src/test/queries/clientpositive/vectorized_case.q
    index 8799fbb..e74bf82 100644
    --- a/ql/src/test/queries/clientpositive/vectorized_case.q
    +++ b/ql/src/test/queries/clientpositive/vectorized_case.q
    @@ -1,4 +1,5 @@
      set hive.explain.user=false;
    +set hive.fetch.task.conversion=none;
      set hive.vectorized.execution.enabled = true
      ;
      explain
    @@ -36,3 +37,21 @@ where csmallint = 418
      or csmallint = 12205
      or csmallint = 10583
      ;
    +explain
    +select
    + csmallint,
    + case
    + when csmallint = 418 then "a"
    + when csmallint = 12205 then "b"
    + else null
    + end,
    + case csmallint
    + when 418 then "a"
    + when 12205 then null
    + else "c"
    + end
    +from alltypesorc
    +where csmallint = 418
    +or csmallint = 12205
    +or csmallint = 10583
    +;

    http://git-wip-us.apache.org/repos/asf/hive/blob/db2c5009/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
    index c2250e6..ade9cfe 100644
    --- a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
    +++ b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
    @@ -35,21 +35,40 @@ or csmallint = 12205
      or csmallint = 10583
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
    - Stage-0 is a root stage
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1

      STAGE PLANS:
    + Stage: Stage-1
    + Spark
    +#### A masked pattern was here ####
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: alltypesorc
    + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
    + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
    + outputColumnNames: _col0, _col1, _col2
    + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + Execution mode: vectorized
    +
        Stage: Stage-0
          Fetch Operator
            limit: -1
            Processor Tree:
    - TableScan
    - alias: alltypesorc
    - Filter Operator
    - predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
    - Select Operator
    - expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
    - outputColumnNames: _col0, _col1, _col2
    - ListSink
    + ListSink

      PREHOOK: query: select
        csmallint,
    @@ -93,3 +112,75 @@ POSTHOOK: Input: default@alltypesorc
      10583 c c
      418 a a
      12205 b b
    +PREHOOK: query: explain
    +select
    + csmallint,
    + case
    + when csmallint = 418 then "a"
    + when csmallint = 12205 then "b"
    + else null
    + end,
    + case csmallint
    + when 418 then "a"
    + when 12205 then null
    + else "c"
    + end
    +from alltypesorc
    +where csmallint = 418
    +or csmallint = 12205
    +or csmallint = 10583
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +select
    + csmallint,
    + case
    + when csmallint = 418 then "a"
    + when csmallint = 12205 then "b"
    + else null
    + end,
    + case csmallint
    + when 418 then "a"
    + when 12205 then null
    + else "c"
    + end
    +from alltypesorc
    +where csmallint = 418
    +or csmallint = 12205
    +or csmallint = 10583
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Spark
    +#### A masked pattern was here ####
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: alltypesorc
    + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
    + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE (null) END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN (null) ELSE ('c') END (type: string)
    + outputColumnNames: _col0, _col1, _col2
    + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + Execution mode: vectorized
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/db2c5009/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorized_case.q.out b/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
    index c2250e6..136714d 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
    @@ -35,21 +35,40 @@ or csmallint = 12205
      or csmallint = 10583
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
    - Stage-0 is a root stage
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1

      STAGE PLANS:
    + Stage: Stage-1
    + Tez
    +#### A masked pattern was here ####
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: alltypesorc
    + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
    + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
    + outputColumnNames: _col0, _col1, _col2
    + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + Execution mode: vectorized
    +
        Stage: Stage-0
          Fetch Operator
            limit: -1
            Processor Tree:
    - TableScan
    - alias: alltypesorc
    - Filter Operator
    - predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
    - Select Operator
    - expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
    - outputColumnNames: _col0, _col1, _col2
    - ListSink
    + ListSink

      PREHOOK: query: select
        csmallint,
    @@ -93,3 +112,75 @@ POSTHOOK: Input: default@alltypesorc
      10583 c c
      418 a a
      12205 b b
    +PREHOOK: query: explain
    +select
    + csmallint,
    + case
    + when csmallint = 418 then "a"
    + when csmallint = 12205 then "b"
    + else null
    + end,
    + case csmallint
    + when 418 then "a"
    + when 12205 then null
    + else "c"
    + end
    +from alltypesorc
    +where csmallint = 418
    +or csmallint = 12205
    +or csmallint = 10583
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +select
    + csmallint,
    + case
    + when csmallint = 418 then "a"
    + when csmallint = 12205 then "b"
    + else null
    + end,
    + case csmallint
    + when 418 then "a"
    + when 12205 then null
    + else "c"
    + end
    +from alltypesorc
    +where csmallint = 418
    +or csmallint = 12205
    +or csmallint = 10583
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Tez
    +#### A masked pattern was here ####
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: alltypesorc
    + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
    + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE (null) END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN (null) ELSE ('c') END (type: string)
    + outputColumnNames: _col0, _col1, _col2
    + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + Execution mode: vectorized
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/db2c5009/ql/src/test/results/clientpositive/vectorized_case.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/vectorized_case.q.out b/ql/src/test/results/clientpositive/vectorized_case.q.out
    index 73bf12d..347a93e 100644
    --- a/ql/src/test/results/clientpositive/vectorized_case.q.out
    +++ b/ql/src/test/results/clientpositive/vectorized_case.q.out
    @@ -109,3 +109,72 @@ POSTHOOK: Input: default@alltypesorc
      10583 c c
      418 a a
      12205 b b
    +PREHOOK: query: explain
    +select
    + csmallint,
    + case
    + when csmallint = 418 then "a"
    + when csmallint = 12205 then "b"
    + else null
    + end,
    + case csmallint
    + when 418 then "a"
    + when 12205 then null
    + else "c"
    + end
    +from alltypesorc
    +where csmallint = 418
    +or csmallint = 12205
    +or csmallint = 10583
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +select
    + csmallint,
    + case
    + when csmallint = 418 then "a"
    + when csmallint = 12205 then "b"
    + else null
    + end,
    + case csmallint
    + when 418 then "a"
    + when 12205 then null
    + else "c"
    + end
    +from alltypesorc
    +where csmallint = 418
    +or csmallint = 12205
    +or csmallint = 10583
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: alltypesorc
    + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
    + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE (null) END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN (null) ELSE ('c') END (type: string)
    + outputColumnNames: _col0, _col1, _col2
    + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + Execution mode: vectorized
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-11966 JDBC Driver parsing error when reading principal from ZooKeeper (Vaibhav Gumashta via Alan Gates)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/de1fe68b
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/de1fe68b
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/de1fe68b

    Branch: refs/heads/master-fixed
    Commit: de1fe68b90ff9e29a21095035c7ed02dbbf35f26
    Parents: 1357f63
    Author: Alan Gates <gates@hortonworks.com>
    Authored: Mon Nov 2 16:01:03 2015 -0800
    Committer: Alan Gates <gates@hortonworks.com>
    Committed: Mon Nov 2 16:01:03 2015 -0800

    ----------------------------------------------------------------------
      jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java | 2 +-
      1 file changed, 1 insertion(+), 1 deletion(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/de1fe68b/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java
    ----------------------------------------------------------------------
    diff --git a/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java b/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java
    index 7195515..1ca77a1 100644
    --- a/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java
    +++ b/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java
    @@ -159,7 +159,7 @@ class ZooKeeperHiveClientHelper {
              }
              // KERBEROS
              // If delegation token is passed from the client side, do not set the principal
    - if (matcher.group(2).equalsIgnoreCase("hive.server2.authentication.kerberos.principal")
    + if (matcher.group(1).equalsIgnoreCase("hive.server2.authentication.kerberos.principal")
                  && !(connParams.getSessionVars().containsKey(JdbcConnectionParams.AUTH_TYPE) && connParams
                      .getSessionVars().get(JdbcConnectionParams.AUTH_TYPE)
                      .equalsIgnoreCase(JdbcConnectionParams.AUTH_TOKEN))
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-11293 HiveConnection.setAutoCommit(true) throws exception (Michał Węgrzyn and Alan Gates, reviewed by Thejas Nair)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1357f633
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1357f633
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1357f633

    Branch: refs/heads/master-fixed
    Commit: 1357f6338796600fe37b81bb11600ad56da3d4e2
    Parents: 1ad1dc8
    Author: Alan Gates <gates@hortonworks.com>
    Authored: Mon Nov 2 15:53:07 2015 -0800
    Committer: Alan Gates <gates@hortonworks.com>
    Committed: Mon Nov 2 15:53:07 2015 -0800

    ----------------------------------------------------------------------
      .../org/apache/hive/jdbc/TestJdbcDriver2.java | 89 ++++++++++++------
      .../org/apache/hive/jdbc/HiveConnection.java | 96 +++++++++++---------
      2 files changed, 113 insertions(+), 72 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/1357f633/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
    ----------------------------------------------------------------------
    diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
    index 995a33d..ced454f 100644
    --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
    +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
    @@ -18,14 +18,28 @@

      package org.apache.hive.jdbc;

    -import static org.apache.hadoop.hive.conf.SystemVariables.SET_COLUMN_NAME;
    -import static org.apache.hadoop.hive.ql.exec.ExplainTask.EXPL_COLUMN_NAME;
    -import static org.junit.Assert.assertEquals;
    -import static org.junit.Assert.assertFalse;
    -import static org.junit.Assert.assertNotNull;
    -import static org.junit.Assert.assertNull;
    -import static org.junit.Assert.assertTrue;
    -import static org.junit.Assert.fail;
    +import org.apache.hadoop.fs.Path;
    +import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
    +import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
    +import org.apache.hadoop.hive.conf.HiveConf;
    +import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
    +import org.apache.hadoop.hive.metastore.TableType;
    +import org.apache.hadoop.hive.ql.exec.UDF;
    +import org.apache.hadoop.hive.ql.processors.DfsProcessor;
    +import org.apache.hive.common.util.HiveVersionInfo;
    +import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
    +import org.apache.hive.service.cli.operation.ClassicTableTypeMapping;
    +import org.apache.hive.service.cli.operation.ClassicTableTypeMapping.ClassicTableTypes;
    +import org.apache.hive.service.cli.operation.HiveTableTypeMapping;
    +import org.apache.hive.service.cli.operation.TableTypeMappingFactory.TableTypeMappings;
    +import org.junit.After;
    +import org.junit.Before;
    +import org.junit.BeforeClass;
    +import org.junit.Rule;
    +import org.junit.Test;
    +import org.junit.rules.ExpectedException;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;

      import java.io.InputStream;
      import java.sql.Connection;
    @@ -36,6 +50,7 @@ import java.sql.PreparedStatement;
      import java.sql.ResultSet;
      import java.sql.ResultSetMetaData;
      import java.sql.SQLException;
    +import java.sql.SQLWarning;
      import java.sql.Statement;
      import java.sql.Timestamp;
      import java.sql.Types;
    @@ -49,26 +64,14 @@ import java.util.Properties;
      import java.util.Set;
      import java.util.regex.Pattern;

    -import org.slf4j.Logger;
    -import org.slf4j.LoggerFactory;
    -import org.apache.hadoop.fs.Path;
    -import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
    -import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
    -import org.apache.hadoop.hive.conf.HiveConf;
    -import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
    -import org.apache.hadoop.hive.metastore.TableType;
    -import org.apache.hadoop.hive.ql.exec.UDF;
    -import org.apache.hadoop.hive.ql.processors.DfsProcessor;
    -import org.apache.hive.common.util.HiveVersionInfo;
    -import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
    -import org.apache.hive.service.cli.operation.ClassicTableTypeMapping;
    -import org.apache.hive.service.cli.operation.ClassicTableTypeMapping.ClassicTableTypes;
    -import org.apache.hive.service.cli.operation.HiveTableTypeMapping;
    -import org.apache.hive.service.cli.operation.TableTypeMappingFactory.TableTypeMappings;
    -import org.junit.After;
    -import org.junit.Before;
    -import org.junit.BeforeClass;
    -import org.junit.Test;
    +import static org.apache.hadoop.hive.conf.SystemVariables.SET_COLUMN_NAME;
    +import static org.apache.hadoop.hive.ql.exec.ExplainTask.EXPL_COLUMN_NAME;
    +import static org.junit.Assert.assertEquals;
    +import static org.junit.Assert.assertFalse;
    +import static org.junit.Assert.assertNotNull;
    +import static org.junit.Assert.assertNull;
    +import static org.junit.Assert.assertTrue;
    +import static org.junit.Assert.fail;


      /**
    @@ -96,6 +99,8 @@ public class TestJdbcDriver2 {
        private static boolean standAloneServer = false;
        private static final float floatCompareDelta = 0.0001f;

    + @Rule public ExpectedException thrown = ExpectedException.none();
    +
        public TestJdbcDriver2() {
          conf = new HiveConf(TestJdbcDriver2.class);
          dataFileDir = conf.get("test.data.files").replace('\\', '/')
    @@ -2414,4 +2419,32 @@ public void testParseUrlHttpMode() throws SQLException, JdbcUriParseException,
            }
          }
        }
    +
    + @Test
    + public void testAutoCommit() throws Exception {
    + con.clearWarnings();
    + con.setAutoCommit(true);
    + assertNull(con.getWarnings());
    + con.setAutoCommit(false);
    + SQLWarning warning = con.getWarnings();
    + assertNotNull(warning);
    + assertEquals("Hive does not support autoCommit=false", warning.getMessage());
    + assertNull(warning.getNextWarning());
    + con.clearWarnings();
    + }
    +
    + @Test
    + public void setAutoCommitOnClosedConnection() throws Exception {
    + Connection mycon = getConnection("");
    + try {
    + mycon.setAutoCommit(true);
    + mycon.close();
    + thrown.expect(SQLException.class);
    + thrown.expectMessage("Connection is closed");
    + mycon.setAutoCommit(true);
    + } finally {
    + mycon.close();
    + }
    +
    + }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/1357f633/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
    ----------------------------------------------------------------------
    diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
    index 920d50f..e38c585 100644
    --- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
    +++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
    @@ -18,48 +18,6 @@

      package org.apache.hive.jdbc;

    -import java.io.FileInputStream;
    -import java.io.IOException;
    -import java.lang.reflect.InvocationHandler;
    -import java.lang.reflect.InvocationTargetException;
    -import java.lang.reflect.Method;
    -import java.lang.reflect.Proxy;
    -import java.security.KeyStore;
    -import java.security.SecureRandom;
    -import java.sql.Array;
    -import java.sql.Blob;
    -import java.sql.CallableStatement;
    -import java.sql.Clob;
    -import java.sql.Connection;
    -import java.sql.DatabaseMetaData;
    -import java.sql.DriverManager;
    -import java.sql.NClob;
    -import java.sql.PreparedStatement;
    -import java.sql.ResultSet;
    -import java.sql.SQLClientInfoException;
    -import java.sql.SQLException;
    -import java.sql.SQLWarning;
    -import java.sql.SQLXML;
    -import java.sql.Savepoint;
    -import java.sql.Statement;
    -import java.sql.Struct;
    -import java.util.HashMap;
    -import java.util.LinkedList;
    -import java.util.List;
    -import java.util.Map;
    -import java.util.Map.Entry;
    -import java.util.Properties;
    -import java.util.concurrent.Executor;
    -import java.util.concurrent.TimeUnit;
    -
    -import javax.net.ssl.KeyManagerFactory;
    -import javax.net.ssl.SSLContext;
    -import javax.net.ssl.TrustManagerFactory;
    -import javax.security.sasl.Sasl;
    -import javax.security.sasl.SaslException;
    -
    -import org.apache.http.impl.client.CloseableHttpClient;
    -import org.apache.http.protocol.HttpContext;
      import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
      import org.apache.hive.service.auth.HiveAuthFactory;
      import org.apache.hive.service.auth.KerberosSaslHelper;
    @@ -87,9 +45,11 @@ import org.apache.http.config.RegistryBuilder;
      import org.apache.http.conn.socket.ConnectionSocketFactory;
      import org.apache.http.conn.ssl.SSLSocketFactory;
      import org.apache.http.impl.client.BasicCookieStore;
    +import org.apache.http.impl.client.CloseableHttpClient;
      import org.apache.http.impl.client.HttpClientBuilder;
      import org.apache.http.impl.client.HttpClients;
      import org.apache.http.impl.conn.BasicHttpClientConnectionManager;
    +import org.apache.http.protocol.HttpContext;
      import org.apache.thrift.TException;
      import org.apache.thrift.protocol.TBinaryProtocol;
      import org.apache.thrift.transport.THttpClient;
    @@ -98,6 +58,45 @@ import org.apache.thrift.transport.TTransportException;
      import org.slf4j.Logger;
      import org.slf4j.LoggerFactory;

    +import javax.net.ssl.KeyManagerFactory;
    +import javax.net.ssl.SSLContext;
    +import javax.net.ssl.TrustManagerFactory;
    +import javax.security.sasl.Sasl;
    +import javax.security.sasl.SaslException;
    +import java.io.FileInputStream;
    +import java.io.IOException;
    +import java.lang.reflect.InvocationHandler;
    +import java.lang.reflect.InvocationTargetException;
    +import java.lang.reflect.Method;
    +import java.lang.reflect.Proxy;
    +import java.security.KeyStore;
    +import java.security.SecureRandom;
    +import java.sql.Array;
    +import java.sql.Blob;
    +import java.sql.CallableStatement;
    +import java.sql.Clob;
    +import java.sql.Connection;
    +import java.sql.DatabaseMetaData;
    +import java.sql.DriverManager;
    +import java.sql.NClob;
    +import java.sql.PreparedStatement;
    +import java.sql.ResultSet;
    +import java.sql.SQLClientInfoException;
    +import java.sql.SQLException;
    +import java.sql.SQLWarning;
    +import java.sql.SQLXML;
    +import java.sql.Savepoint;
    +import java.sql.Statement;
    +import java.sql.Struct;
    +import java.util.HashMap;
    +import java.util.LinkedList;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.Map.Entry;
    +import java.util.Properties;
    +import java.util.concurrent.Executor;
    +import java.util.concurrent.TimeUnit;
    +
      /**
       * HiveConnection.
       *
    @@ -1216,8 +1215,17 @@ public class HiveConnection implements java.sql.Connection {

        @Override
        public void setAutoCommit(boolean autoCommit) throws SQLException {
    - if (autoCommit) {
    - throw new SQLException("enabling autocommit is not supported");
    + // Per JDBC spec, if the connection is closed a SQLException should be thrown.
    + if(isClosed) {
    + throw new SQLException("Connection is closed");
    + }
    + // The auto-commit mode is always enabled for this connection. Per JDBC spec,
    + // if setAutoCommit is called and the auto-commit mode is not changed, the call is a no-op.
    + if (!autoCommit) {
    + LOG.warn("Request to set autoCommit to false; Hive does not support autoCommit=false.");
    + SQLWarning warning = new SQLWarning("Hive does not support autoCommit=false");
    + if (warningChain == null) warningChain = warning;
    + else warningChain.setNextWarning(warning);
          }
        }
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-12202 NPE thrown when reading legacy ACID delta files(Elliot West via Eugene Koifman)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/02629e97
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/02629e97
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/02629e97

    Branch: refs/heads/master-fixed
    Commit: 02629e9794e228dcaa8d446423a256d75f71d6dd
    Parents: 47617d3
    Author: Eugene Koifman <ekoifman@hortonworks.com>
    Authored: Tue Nov 3 09:06:19 2015 -0800
    Committer: Eugene Koifman <ekoifman@hortonworks.com>
    Committed: Tue Nov 3 09:06:19 2015 -0800

    ----------------------------------------------------------------------
      .../org/apache/hadoop/hive/ql/io/AcidInputFormat.java | 14 +++++---------
      1 file changed, 5 insertions(+), 9 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/02629e97/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
    index 24506b7..7c7074d 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
    @@ -33,7 +33,6 @@ import java.io.DataInput;
      import java.io.DataOutput;
      import java.io.IOException;
      import java.util.ArrayList;
    -import java.util.Collections;
      import java.util.List;

      /**
    @@ -115,11 +114,14 @@ public interface AcidInputFormat<KEY extends WritableComparable, VALUE>
          private List<Integer> stmtIds;

          public DeltaMetaData() {
    - this(0,0,null);
    + this(0,0,new ArrayList<Integer>());
          }
          DeltaMetaData(long minTxnId, long maxTxnId, List<Integer> stmtIds) {
            this.minTxnId = minTxnId;
            this.maxTxnId = maxTxnId;
    + if (stmtIds == null) {
    + throw new IllegalArgumentException("stmtIds == null");
    + }
            this.stmtIds = stmtIds;
          }
          long getMinTxnId() {
    @@ -136,9 +138,6 @@ public interface AcidInputFormat<KEY extends WritableComparable, VALUE>
            out.writeLong(minTxnId);
            out.writeLong(maxTxnId);
            out.writeInt(stmtIds.size());
    - if(stmtIds == null) {
    - return;
    - }
            for(Integer id : stmtIds) {
              out.writeInt(id);
            }
    @@ -147,11 +146,8 @@ public interface AcidInputFormat<KEY extends WritableComparable, VALUE>
          public void readFields(DataInput in) throws IOException {
            minTxnId = in.readLong();
            maxTxnId = in.readLong();
    + stmtIds.clear();
            int numStatements = in.readInt();
    - if(numStatements <= 0) {
    - return;
    - }
    - stmtIds = new ArrayList<>();
            for(int i = 0; i < numStatements; i++) {
              stmtIds.add(in.readInt());
            }
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-12238: Vectorization: Thread-safety errors in VectorUDFDate (Gopal V, reviewed by Gunther Hagleitner)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d7c04859
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d7c04859
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d7c04859

    Branch: refs/heads/master-fixed
    Commit: d7c04859e1903cd6ed38678e8dae6b453c34b7bb
    Parents: ad12765
    Author: Gopal V <gopalv@apache.org>
    Authored: Mon Nov 2 19:56:08 2015 -0800
    Committer: Gopal V <gopalv@apache.org>
    Committed: Mon Nov 2 19:56:08 2015 -0800

    ----------------------------------------------------------------------
      .../vector/expressions/VectorUDFDateString.java | 4 +-
      .../expressions/TestVectorDateExpressions.java | 71 +++++++++++++++++++-
      2 files changed, 72 insertions(+), 3 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/d7c04859/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateString.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateString.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateString.java
    index f1a5b93..e27ac6a 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateString.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateString.java
    @@ -22,6 +22,7 @@ import org.slf4j.Logger;
      import org.slf4j.LoggerFactory;
      import org.apache.hadoop.hive.ql.exec.vector.VectorGroupByOperator;
      import org.apache.hadoop.io.Text;
    +import org.apache.hive.common.util.DateUtils;

      import java.text.SimpleDateFormat;
      import java.util.Date;
    @@ -30,14 +31,13 @@ import java.text.ParseException;
      public class VectorUDFDateString extends StringUnaryUDF {
        private static final long serialVersionUID = 1L;

    - private transient static SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd");
    -
        private static final Logger LOG = LoggerFactory.getLogger(
            VectorUDFDateString.class.getName());

        public VectorUDFDateString(int colNum, int outputColumn) {
          super(colNum, outputColumn, new StringUnaryUDF.IUDFUnaryString() {
            Text t = new Text();
    + final transient SimpleDateFormat formatter = DateUtils.getDateFormat();

            @Override
            public Text evaluate(Text s) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/d7c04859/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateExpressions.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateExpressions.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateExpressions.java
    index 6bd4be1..9c4a751 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateExpressions.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateExpressions.java
    @@ -18,8 +18,9 @@

      package org.apache.hadoop.hive.ql.exec.vector.expressions;

    -import junit.framework.Assert;
    +import org.junit.Assert;
      import org.apache.commons.lang.ArrayUtils;
    +import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
      import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
      import org.apache.hadoop.hive.ql.exec.vector.TestVectorizedRowBatch;
      import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
    @@ -31,15 +32,28 @@ import org.apache.hadoop.hive.serde2.io.DateWritable;
      import org.apache.hadoop.hive.serde2.io.TimestampWritable;
      import org.apache.hadoop.io.IntWritable;
      import org.apache.hadoop.io.LongWritable;
    +import org.junit.After;
    +import org.junit.Before;
      import org.junit.Test;
    +import org.junit.internal.runners.statements.Fail;
    +
    +import com.google.common.util.concurrent.ThreadFactoryBuilder;

      import java.sql.Timestamp;
      import java.util.ArrayList;
      import java.util.Calendar;
      import java.util.List;
      import java.util.Random;
    +import java.util.concurrent.Callable;
    +import java.util.concurrent.ExecutorService;
    +import java.util.concurrent.Executors;
    +import java.util.concurrent.Future;
    +import java.util.concurrent.ThreadFactory;

      public class TestVectorDateExpressions {
    +
    + private ExecutorService runner;
    +
        /* copied over from VectorUDFTimestampFieldLong */
        private TimestampWritable toTimestampWritable(long daysSinceEpoch) {
          Timestamp ts = new Timestamp(DateWritable.daysToMillis((int) daysSinceEpoch));
    @@ -412,6 +426,60 @@ public class TestVectorDateExpressions {
          verifyUDFWeekOfYear(batch);
        }

    + @Before
    + public void setUp() throws Exception {
    + runner =
    + Executors.newFixedThreadPool(3,
    + new ThreadFactoryBuilder().setNameFormat("date-tester-thread-%d").build());
    + }
    +
    + private static final class MultiThreadedDateFormatTest implements Callable<Void> {
    + @Override
    + public Void call() throws Exception {
    + int batchSize = 1024;
    + VectorUDFDateString udf = new VectorUDFDateString(0, 1);
    + VectorizedRowBatch batch = new VectorizedRowBatch(2, batchSize);
    + BytesColumnVector in = new BytesColumnVector(batchSize);
    + BytesColumnVector out = new BytesColumnVector(batchSize);
    + batch.cols[0] = in;
    + batch.cols[1] = out;
    + for (int i = 0; i < batchSize; i++) {
    + byte[] data = String.format("1999-%02d-%02d", 1 + (i % 12), 1 + (i % 15)).getBytes("UTF-8");
    + in.setRef(i, data, 0, data.length);
    + in.isNull[i] = false;
    + }
    + udf.evaluate(batch);
    + // bug if it throws an exception
    + return (Void) null;
    + }
    + }
    +
    + // 5s timeout
    + @Test(timeout = 5000)
    + public void testMultiThreadedVectorUDFDate() {
    + List<Callable<Void>> tasks = new ArrayList<Callable<Void>>();
    + for (int i = 0; i < 200; i++) {
    + tasks.add(new MultiThreadedDateFormatTest());
    + }
    + try {
    + List<Future<Void>> results = runner.invokeAll(tasks);
    + for (Future<Void> f : results) {
    + Assert.assertNull(f.get());
    + }
    + } catch (InterruptedException ioe) {
    + Assert.fail("Interrupted while running tests");
    + } catch (Exception e) {
    + Assert.fail("Multi threaded operations threw unexpected Exception: " + e.getMessage());
    + }
    + }
    +
    + @After
    + public void tearDown() throws Exception {
    + if (runner != null) {
    + runner.shutdownNow();
    + }
    + }
    +
        public static void main(String[] args) {
          TestVectorDateExpressions self = new TestVectorDateExpressions();
          self.testVectorUDFYear();
    @@ -419,5 +487,6 @@ public class TestVectorDateExpressions {
          self.testVectorUDFDayOfMonth();
          self.testVectorUDFWeekOfYear();
          self.testVectorUDFUnixTimeStamp();
    + self.testMultiThreadedVectorUDFDate();
        }
      }
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-12333: tez_union_with_udf.q added to wrong section in testconfiguration.properties (Jason Dere, reviewed by Chinna Lalam)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0a905624
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0a905624
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0a905624

    Branch: refs/heads/master-fixed
    Commit: 0a905624f1712120db8b41586248201002d5a544
    Parents: 37f05f4
    Author: Jason Dere <jdere@hortonworks.com>
    Authored: Wed Nov 4 17:14:34 2015 -0800
    Committer: Jason Dere <jdere@hortonworks.com>
    Committed: Wed Nov 4 17:14:34 2015 -0800

    ----------------------------------------------------------------------
      itests/src/test/resources/testconfiguration.properties | 2 +-
      1 file changed, 1 insertion(+), 1 deletion(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/0a905624/itests/src/test/resources/testconfiguration.properties
    ----------------------------------------------------------------------
    diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
    index 2d1d274..d16c318 100644
    --- a/itests/src/test/resources/testconfiguration.properties
    +++ b/itests/src/test/resources/testconfiguration.properties
    @@ -379,6 +379,7 @@ minitez.query.files=bucket_map_join_tez1.q,\
        tez_union2.q,\
        tez_union_dynamic_partition.q,\
        tez_union_view.q,\
    + tez_union_with_udf.q,\
        tez_union_decimal.q,\
        tez_union_group_by.q,\
        tez_smb_main.q,\
    @@ -424,7 +425,6 @@ minillap.query.files=bucket_map_join_tez1.q,\
        tez_union_view.q,\
        tez_union_decimal.q,\
        tez_union_group_by.q,\
    - tez_union_with_udf.q,\
        tez_smb_main.q,\
        tez_smb_1.q,\
        vectorized_dynamic_partition_pruning.q,\
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-11634 : Support partition pruning for IN(STRUCT(partcol, nonpartcol..)...) (Hari Subramaniyan, reviewed by Laljo John Pullokkaran)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c9246f44
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c9246f44
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c9246f44

    Branch: refs/heads/master-fixed
    Commit: c9246f44ead401b9121c3badbfbdb07cc9227a0a
    Parents: 55a24f0
    Author: Hari Subramaniyan <harisankar@apache.org>
    Authored: Mon Nov 2 11:34:49 2015 -0800
    Committer: Hari Subramaniyan <harisankar@apache.org>
    Committed: Mon Nov 2 11:34:49 2015 -0800

    ----------------------------------------------------------------------
      .../org/apache/hadoop/hive/conf/HiveConf.java | 4 +-
      .../apache/hadoop/hive/conf/HiveConf.java.orig | 5 +-
      .../hadoop/hive/ql/optimizer/Optimizer.java | 10 +-
      .../ql/optimizer/PartitionColumnsSeparator.java | 525 ++++
      .../hive/ql/optimizer/PointLookupOptimizer.java | 90 +-
      .../ql/optimizer/pcr/PcrExprProcFactory.java | 33 +
      .../hive/ql/optimizer/ppr/OpProcFactory.java | 3 +-
      .../apache/hadoop/hive/ql/plan/FilterDesc.java | 9 -
      ql/src/test/queries/clientpositive/pcs.q | 66 +
      .../test/queries/clientpositive/pointlookup.q | 6 +-
      .../test/queries/clientpositive/pointlookup2.q | 2 +-
      .../test/queries/clientpositive/pointlookup3.q | 2 +-
      .../dynpart_sort_optimization_acid.q.out | 4 +-
      .../llap/dynamic_partition_pruning.q.out | 45 -
      .../vectorized_dynamic_partition_pruning.q.out | 45 -
      ql/src/test/results/clientpositive/pcs.q.out | 2249 ++++++++++++++++++
      .../results/clientpositive/pointlookup.q.out | 8 +-
      .../tez/dynamic_partition_pruning.q.out | 45 -
      .../vectorized_dynamic_partition_pruning.q.out | 45 -
      19 files changed, 2896 insertions(+), 300 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    ----------------------------------------------------------------------
    diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    index b214344..5198bb5 100644
    --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    @@ -1263,8 +1263,8 @@ public class HiveConf extends Configuration {
               "Whether to transform OR clauses in Filter operators into IN clauses"),
          HIVEPOINTLOOKUPOPTIMIZERMIN("hive.optimize.point.lookup.min", 31,
                   "Minimum number of OR clauses needed to transform into IN clauses"),
    - HIVEPOINTLOOKUPOPTIMIZEREXTRACT("hive.optimize.point.lookup.extract", true,
    - "Extract partial expressions when optimizing point lookup IN clauses"),
    + HIVEPARTITIONCOLUMNSEPARATOR("hive.optimize.partition.columns.separate", true,
    + "Extract partition columns from IN clauses"),
          // Constant propagation optimizer
          HIVEOPTCONSTANTPROPAGATION("hive.optimize.constant.propagation", true, "Whether to enable constant propagation optimizer"),
          HIVEIDENTITYPROJECTREMOVER("hive.optimize.remove.identity.project", true, "Removes identity project from operator tree"),

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
    ----------------------------------------------------------------------
    diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
    index f05f224..b214344 100644
    --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
    +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig
    @@ -2206,7 +2206,10 @@ public class HiveConf extends Configuration {
              "Exceeding this will trigger a flush irrelevant of memory pressure condition."),
          HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT("hive.vectorized.groupby.flush.percent", (float) 0.1,
              "Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded."),
    -
    + HIVE_VECTORIZATION_REDUCESINK_NEW_ENABLED("hive.vectorized.execution.reducesink.new.enabled", true,
    + "This flag should be set to true to enable the new vectorization\n" +
    + "of queries using ReduceSink.\ni" +
    + "The default value is true."),
          HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true, "This property has been extended to control "
              + "whether to check, convert, and normalize partition value to conform to its column type in "
              + "partition operations including but not limited to insert, such as alter, describe etc."),

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
    index 7ee5081..6347872 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
    @@ -84,11 +84,11 @@ public class Optimizer {
          if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZER)) {
            final int min = HiveConf.getIntVar(hiveConf,
                HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZERMIN);
    - final boolean extract = HiveConf.getBoolVar(hiveConf,
    - HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZEREXTRACT);
    - final boolean testMode = HiveConf.getBoolVar(hiveConf,
    - HiveConf.ConfVars.HIVE_IN_TEST);
    - transformations.add(new PointLookupOptimizer(min, extract, testMode));
    + transformations.add(new PointLookupOptimizer(min));
    + }
    +
    + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEPARTITIONCOLUMNSEPARATOR)) {
    + transformations.add(new PartitionColumnsSeparator());
          }

          if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD)) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java
    new file mode 100644
    index 0000000..f71f37c
    --- /dev/null
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java
    @@ -0,0 +1,525 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.hadoop.hive.ql.optimizer;
    +
    +import java.util.ArrayList;
    +import java.util.HashMap;
    +import java.util.HashSet;
    +import java.util.IdentityHashMap;
    +import java.util.LinkedHashMap;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.Map.Entry;
    +import java.util.Set;
    +import java.util.Stack;
    +
    +import org.apache.commons.logging.Log;
    +import org.apache.commons.logging.LogFactory;
    +import org.apache.hadoop.hive.ql.exec.Description;
    +import org.apache.hadoop.hive.ql.exec.FilterOperator;
    +import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
    +import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
    +import org.apache.hadoop.hive.ql.lib.Dispatcher;
    +import org.apache.hadoop.hive.ql.lib.ForwardWalker;
    +import org.apache.hadoop.hive.ql.lib.GraphWalker;
    +import org.apache.hadoop.hive.ql.lib.Node;
    +import org.apache.hadoop.hive.ql.lib.NodeProcessor;
    +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
    +import org.apache.hadoop.hive.ql.lib.PreOrderOnceWalker;
    +import org.apache.hadoop.hive.ql.lib.Rule;
    +import org.apache.hadoop.hive.ql.lib.RuleRegExp;
    +import org.apache.hadoop.hive.ql.lib.TypeRule;
    +import org.apache.hadoop.hive.ql.parse.ParseContext;
    +import org.apache.hadoop.hive.ql.parse.SemanticException;
    +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
    +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
    +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
    +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
    +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
    +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
    +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFStruct;
    +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
    +
    +/**
    + * This optimization will take a Filter expression, and if its predicate contains
    + * an IN operator whose children are constant structs or structs containing constant fields,
    + * it will try to generate predicate with IN clauses containing only partition columns.
    + * This predicate is in turn used by the partition pruner to prune the columns that are not
    + * part of the original IN(STRUCT(..)..) predicate.
    + */
    +public class PartitionColumnsSeparator implements Transform {
    +
    + private static final Log LOG = LogFactory.getLog(PointLookupOptimizer.class);
    + private static final String IN_UDF =
    + GenericUDFIn.class.getAnnotation(Description.class).name();
    + private static final String STRUCT_UDF =
    + GenericUDFStruct.class.getAnnotation(Description.class).name();
    + private static final String AND_UDF =
    + GenericUDFOPAnd.class.getAnnotation(Description.class).name();
    +
    + @Override
    + public ParseContext transform(ParseContext pctx) throws SemanticException {
    + // 1. Trigger transformation
    + Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
    + opRules.put(new RuleRegExp("R1", FilterOperator.getOperatorName() + "%"), new StructInTransformer());
    +
    + Dispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
    + GraphWalker ogw = new ForwardWalker(disp);
    +
    + List<Node> topNodes = new ArrayList<Node>();
    + topNodes.addAll(pctx.getTopOps().values());
    + ogw.startWalking(topNodes, null);
    + return pctx;
    + }
    +
    + private class StructInTransformer implements NodeProcessor {
    +
    + @Override
    + public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
    + Object... nodeOutputs) throws SemanticException {
    + FilterOperator filterOp = (FilterOperator) nd;
    + ExprNodeDesc predicate = filterOp.getConf().getPredicate();
    +
    + // Generate the list bucketing pruning predicate as 2 separate IN clauses
    + // containing the partitioning and non-partitioning columns.
    + ExprNodeDesc newPredicate = generateInClauses(predicate);
    + if (newPredicate != null) {
    + // Replace filter in current FIL with new FIL
    + if (LOG.isDebugEnabled()) {
    + LOG.debug("Generated new predicate with IN clause: " + newPredicate);
    + }
    + final List<ExprNodeDesc> subExpr =
    + new ArrayList<ExprNodeDesc>(2);
    + subExpr.add(predicate);
    + subExpr.add(newPredicate);
    + ExprNodeGenericFuncDesc newFilterPredicate = new ExprNodeGenericFuncDesc(
    + TypeInfoFactory.booleanTypeInfo,
    + FunctionRegistry.getFunctionInfo(AND_UDF).getGenericUDF(), subExpr);
    + filterOp.getConf().setPredicate(newFilterPredicate);
    + }
    +
    + return null;
    + }
    +
    + private ExprNodeDesc generateInClauses(ExprNodeDesc predicate) throws SemanticException {
    + Map<Rule, NodeProcessor> exprRules = new LinkedHashMap<Rule, NodeProcessor>();
    + exprRules.put(new TypeRule(ExprNodeGenericFuncDesc.class), new StructInExprProcessor());
    +
    + // The dispatcher fires the processor corresponding to the closest matching
    + // rule and passes the context along
    + Dispatcher disp = new DefaultRuleDispatcher(null, exprRules, null);
    + GraphWalker egw = new PreOrderOnceWalker(disp);
    +
    + List<Node> startNodes = new ArrayList<Node>();
    + startNodes.add(predicate);
    +
    + HashMap<Node, Object> outputMap = new HashMap<Node, Object>();
    + egw.startWalking(startNodes, outputMap);
    + return (ExprNodeDesc) outputMap.get(predicate);
    + }
    + }
    +
    + /**
    + * The StructInExprProcessor processes the IN clauses of the following format :
    + * STRUCT(T1.a, T1.b, T2.b, T2.c) IN (STRUCT(1, 2, 3, 4) , STRUCT(2, 3, 4, 5))
    + * where T1.a, T1.b, T2.c are all partition columns and T2.b is a non-partition
    + * column. The resulting additional predicate generated after
    + * StructInExprProcessor.process() looks like :
    + * STRUCT(T1.a, T1.b) IN (STRUCT(1, 2), STRUCT(2, 3))
    + * AND
    + * STRUCT(T2.b) IN (STRUCT(4), STRUCT(5))
    + * The additional predicate generated is used to prune the partitions that are
    + * part of the given query. Once the partitions are pruned, the partition condition
    + * remover is expected to remove the redundant predicates from the plan.
    + */
    + private class StructInExprProcessor implements NodeProcessor {
    +
    + /** TableInfo is populated in PASS 1 of process(). It contains the information required
    + * to generate an IN clause of the following format:
    + * STRUCT(T1.a, T1.b) IN (const STRUCT(1, 2), const STRUCT(2, 3))
    + * In the above e.g. please note that all elements of the struct come from the same table.
    + * The populated TableStructInfo is used to generate the IN clause in PASS 2 of process().
    + * The table struct information class has the following fields:
    + * 1. Expression Node Descriptor for the Left Hand Side of the IN clause for the table
    + * 2. 2-D List of expression node descriptors which corresponds to the elements of IN clause
    + */
    + class TableInfo {
    + List<ExprNodeDesc> exprNodeLHSDescriptor;
    + List<List<ExprNodeDesc>> exprNodeRHSStructs;
    +
    + public TableInfo() {
    + exprNodeLHSDescriptor = new ArrayList<ExprNodeDesc>();
    + exprNodeRHSStructs = new ArrayList<List<ExprNodeDesc>>();
    + }
    + }
    +
    + // Mapping from expression node to is an expression containing only
    + // partition or virtual column or constants
    + private Map<ExprNodeDesc, Boolean> exprNodeToPartOrVirtualColOrConstExpr =
    + new IdentityHashMap<ExprNodeDesc, Boolean>();
    +
    + /**
    + * This function iterates through the entire subtree under a given expression node
    + * and makes sure that the expression contain only constant nodes or
    + * partition/virtual columns as leaf nodes.
    + * @param en Expression Node Descriptor for the root node.
    + * @return true if the subtree rooted under en has only partition/virtual columns or
    + * constant values as the leaf nodes. Else, return false.
    + */
    + private boolean exprContainsOnlyPartitionColOrVirtualColOrConstants(ExprNodeDesc en) {
    + if (en == null) {
    + return true;
    + }
    + if (exprNodeToPartOrVirtualColOrConstExpr.containsKey(en)) {
    + return exprNodeToPartOrVirtualColOrConstExpr.get(en);
    + }
    + if (en instanceof ExprNodeColumnDesc) {
    + boolean ret = ((ExprNodeColumnDesc)en).getIsPartitionColOrVirtualCol();
    + exprNodeToPartOrVirtualColOrConstExpr.put(en, ret);
    + return ret;
    + }
    + if (en.getChildren() != null) {
    + for (ExprNodeDesc cn : en.getChildren()) {
    + if (!exprContainsOnlyPartitionColOrVirtualColOrConstants(cn)) {
    + exprNodeToPartOrVirtualColOrConstExpr.put(en, false);
    + return false;
    + }
    + }
    + }
    + exprNodeToPartOrVirtualColOrConstExpr.put(en, true);
    + return true;
    + }
    +
    +
    + /**
    + * Check if the expression node satisfies the following :
    + * Has atleast one subexpression containing a partition/virtualcolumn and has
    + * exactly refer to a single table alias.
    + * @param en Expression Node Descriptor
    + * @return true if there is atleast one subexpression with partition/virtual column
    + * and has exactly refer to a single table alias. If not, return false.
    + */
    + private boolean hasAtleastOneSubExprWithPartColOrVirtualColWithOneTableAlias(ExprNodeDesc en) {
    + if (en == null || en.getChildren() == null) {
    + return false;
    + }
    + for (ExprNodeDesc cn : en.getChildren()) {
    + if (exprContainsOnlyPartitionColOrVirtualColOrConstants(cn) && getTableAlias(cn) != null) {
    + return true;
    + }
    + }
    + return false;
    + }
    +
    +
    + /**
    + * Check if the expression node satisfies the following :
    + * Has all subexpressions containing constants or a partition/virtual column/coming from the
    + * same table
    + * @param en Expression Node Descriptor
    + * @return true/false based on the condition specified in the above description.
    + */
    + private boolean hasAllSubExprWithConstOrPartColOrVirtualColWithOneTableAlias(ExprNodeDesc en) {
    + if (!exprContainsOnlyPartitionColOrVirtualColOrConstants(en)) {
    + return false;
    + }
    +
    + Set<String> s = new HashSet<String>();
    + Set<ExprNodeDesc> visited = new HashSet<ExprNodeDesc>();
    +
    + return getTableAliasHelper(en, s, visited);
    + }
    +
    +
    + /**
    + * Return the expression node descriptor if the input expression node is a GenericUDFIn.
    + * Else, return null.
    + * @param en Expression Node Descriptor
    + * @return The expression node descriptor if the input expression node represents an IN clause.
    + * Else, return null.
    + */
    + private ExprNodeGenericFuncDesc getInExprNode(ExprNodeDesc en) {
    + if (en == null) {
    + return null;
    + }
    +
    + if (en instanceof ExprNodeGenericFuncDesc && ((ExprNodeGenericFuncDesc)(en)).getGenericUDF()
    + instanceof GenericUDFIn) {
    + return (ExprNodeGenericFuncDesc) en;
    + }
    + return null;
    + }
    +
    +
    + /**
    + * Helper used by getTableAlias
    + * @param en Expression Node Descriptor
    + * @param s Set of the table Aliases associated with the current Expression node.
    + * @param visited Visited ExpressionNode set.
    + * @return true if en has at most one table associated with it, else return false.
    + */
    + private boolean getTableAliasHelper(ExprNodeDesc en, Set<String> s, Set<ExprNodeDesc> visited) {
    + visited.add(en);
    +
    + // The current expression node is a column, see if the column alias is already a part of
    + // the return set, s. If not and we already have an entry in set s, this is an invalid expression
    + // and return false.
    + if (en instanceof ExprNodeColumnDesc) {
    + if (s.size() > 0 &&
    + !s.contains(((ExprNodeColumnDesc)en).getTabAlias())) {
    + return false;
    + }
    + if (s.size() == 0) {
    + s.add(((ExprNodeColumnDesc)en).getTabAlias());
    + }
    + return true;
    + }
    + if (en.getChildren() == null) {
    + return true;
    + }
    +
    + // Iterative through the children in a DFS manner to see if there is more than 1 table alias
    + // referenced by the current expression node.
    + for (ExprNodeDesc cn : en.getChildren()) {
    + if (visited.contains(cn)) {
    + continue;
    + }
    + if (cn instanceof ExprNodeColumnDesc) {
    + s.add(((ExprNodeColumnDesc) cn).getTabAlias());
    + } else if (!(cn instanceof ExprNodeConstantDesc)) {
    + if (!getTableAliasHelper(cn, s, visited)) {
    + return false;
    + }
    + }
    + }
    + return true;
    + }
    +
    +
    + /**
    + * If the given expression has just a single table associated with it,
    + * return the table alias associated with it. Else, return null.
    + * @param en
    + * @return The table alias associated with the expression if there is a single table
    + * reference. Else, return null.
    + */
    + private String getTableAlias(ExprNodeDesc en) {
    + Set<String> s = new HashSet<String>();
    + Set<ExprNodeDesc> visited = new HashSet<ExprNodeDesc>();
    + boolean singleTableAlias = getTableAliasHelper(en, s, visited);
    +
    + if (!singleTableAlias || s.size() == 0) {
    + return null;
    + }
    + StringBuilder ans = new StringBuilder();
    + for (String st : s) {
    + ans.append(st);
    + }
    + return ans.toString();
    + }
    +
    +
    + /**
    + * The main process method for StructInExprProcessor to generate additional predicates
    + * containing only partition columns.
    + */
    + @Override
    + public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
    + Object... nodeOutputs) throws SemanticException {
    + ExprNodeGenericFuncDesc fd = getInExprNode((ExprNodeDesc)nd);
    +
    + /***************************************************************************************\
    + BEGIN : Early terminations for Partition Column Separator
    + /***************************************************************************************/
    + // 1. If the input node is not an IN operator, we bail out.
    + if (fd == null) {
    + if (LOG.isDebugEnabled()) {
    + LOG.debug("Partition columns not separated for " + fd + ", is not IN operator : ");
    + }
    + return null;
    + }
    +
    + // 2. Check if the input is an IN operator with struct children
    + List<ExprNodeDesc> children = fd.getChildren();
    + if (!(children.get(0) instanceof ExprNodeGenericFuncDesc) ||
    + (!(((ExprNodeGenericFuncDesc) children.get(0)).getGenericUDF()
    + instanceof GenericUDFStruct))) {
    + if (LOG.isDebugEnabled()) {
    + LOG.debug("Partition columns not separated for " + fd + ", children size " +
    + children.size() + ", child expression : " + children.get(0).getExprString());
    + }
    + return null;
    + }
    +
    + // 3. See if the IN (STRUCT(EXP1, EXP2,..) has atleast one expression with partition
    + // column with single table alias. If not bail out.
    + // We might have expressions containing only partitioning columns, say, T1.A + T2.B
    + // where T1.A and T2.B are both partitioning columns.
    + // However, these expressions should not be considered as valid expressions for separation.
    + if (!hasAtleastOneSubExprWithPartColOrVirtualColWithOneTableAlias(children.get(0))) {
    + if (LOG.isDebugEnabled()) {
    + LOG.debug("Partition columns not separated for " + fd +
    + ", there are no expression containing partition columns in struct fields");
    + }
    + return null;
    + }
    +
    + // 4. See if all the field expressions of the left hand side of IN are expressions
    + // containing constants or only partition columns coming from same table.
    + // If so, we need not perform this optimization and we should bail out.
    + if (hasAllSubExprWithConstOrPartColOrVirtualColWithOneTableAlias(children.get(0))) {
    + if (LOG.isDebugEnabled()) {
    + LOG.debug("Partition columns not separated for " + fd +
    + ", all fields are expressions containing constants or only partition columns"
    + + "coming from same table");
    + }
    + return null;
    + }
    +
    + /***************************************************************************************\
    + END : Early terminations for Partition Column Separator
    + /***************************************************************************************/
    +
    +
    + /***************************************************************************************\
    + BEGIN : Actual processing of the IN (STRUCT(..)) expression.
    + /***************************************************************************************/
    + Map<String, TableInfo> tableAliasToInfo =
    + new HashMap<>();
    + ExprNodeGenericFuncDesc originalStructDesc = ((ExprNodeGenericFuncDesc) children.get(0));
    + List<ExprNodeDesc> originalDescChildren = originalStructDesc.getChildren();
    + /**
    + * PASS 1 : Iterate through the original IN(STRUCT(..)) and populate the tableAlias to
    + * predicate information inside tableAliasToInfo.
    + */
    + for (int i = 0; i < originalDescChildren.size(); i++) {
    + ExprNodeDesc en = originalDescChildren.get(i);
    + String tabAlias = null;
    +
    + // If the current expression node does not have a virtual/partition column or
    + // single table alias reference, ignore it and move to the next expression node.
    + if (!exprContainsOnlyPartitionColOrVirtualColOrConstants(en) ||
    + (tabAlias = getTableAlias(en)) == null) {
    + continue;
    + }
    +
    + TableInfo currTableInfo = null;
    +
    + // If the table alias to information map already contains the current table,
    + // use the existing TableInfo object. Else, create a new one.
    + if (tableAliasToInfo.containsKey(tabAlias)) {
    + currTableInfo = tableAliasToInfo.get(tabAlias);
    + } else {
    + currTableInfo = new TableInfo();
    + }
    + currTableInfo.exprNodeLHSDescriptor.add(en);
    +
    + // Iterate through the children nodes of the IN clauses starting from index 1,
    + // which corresponds to the right hand side of the IN list.
    + // Insert the value corresponding to the current expression in currExprNodeInfo.exprNodeValues.
    + for (int j = 1; j < children.size(); j++) {
    + ExprNodeDesc currChildStructExpr = children.get(j);
    + ExprNodeDesc newConstStructElement = null;
    +
    + // 1. Get the constant value associated with the current element in the struct.
    + // If the current child struct expression is a constant struct.
    + if (currChildStructExpr instanceof ExprNodeConstantDesc) {
    + List<Object> cnCols = (List<Object>)(((ExprNodeConstantDesc) (children.get(j))).getValue());
    + newConstStructElement = new ExprNodeConstantDesc(cnCols.get(i));
    + } else {
    + // This better be a generic struct with constant values as the children.
    + List<ExprNodeDesc> cnChildren = ((ExprNodeGenericFuncDesc) children.get(j)).getChildren();
    + newConstStructElement = new ExprNodeConstantDesc(
    + (((ExprNodeConstantDesc) (cnChildren.get(i))).getValue()));
    + }
    +
    + // 2. Insert the current constant value into exprNodeStructs list.
    + // If there is no struct corresponding to the current element, create a new one, insert
    + // the constant value into it and add the struct as part of exprNodeStructs.
    + if (currTableInfo.exprNodeRHSStructs.size() < j) {
    + List<ExprNodeDesc> newConstStructList = new ArrayList<ExprNodeDesc>();
    + newConstStructList.add(newConstStructElement);
    + currTableInfo.exprNodeRHSStructs.add(newConstStructList);
    + } else {
    + // We already have a struct node for the current index. Insert the constant value
    + // into the corresponding struct node.
    + currTableInfo.exprNodeRHSStructs.get(j-1).add(newConstStructElement);
    + }
    + }
    +
    + // Insert the current table alias entry into the map if not already present in tableAliasToInfo.
    + if (!tableAliasToInfo.containsKey(tabAlias)) {
    + tableAliasToInfo.put(tabAlias, currTableInfo);
    + }
    + }
    +
    + /**
    + * PASS 2 : Iterate through the tableAliasToInfo populated via PASS 1
    + * to generate the new expression.
    + */
    + // subExpr is the list containing generated IN clauses as a result of this optimization.
    + final List<ExprNodeDesc> subExpr =
    + new ArrayList<ExprNodeDesc>(originalDescChildren.size()+1);
    +
    + for (Entry<String, TableInfo> entry :
    + tableAliasToInfo.entrySet()) {
    + TableInfo currTableInfo = entry.getValue();
    + List<List<ExprNodeDesc>> currConstStructList = currTableInfo.exprNodeRHSStructs;
    +
    + // IN(STRUCT(..)..) ExprNodeDesc list for the current table alias.
    + List<ExprNodeDesc> currInStructExprList = new ArrayList<ExprNodeDesc>();
    +
    + // Add the left hand side of the IN clause which contains the struct definition.
    + currInStructExprList.add(ExprNodeGenericFuncDesc.newInstance
    + (FunctionRegistry.getFunctionInfo(STRUCT_UDF).getGenericUDF(),
    + STRUCT_UDF,
    + currTableInfo.exprNodeLHSDescriptor));
    +
    + // Generate the right hand side of the IN clause
    + for (int i = 0; i < currConstStructList.size(); i++) {
    + List<ExprNodeDesc> currConstStruct = currConstStructList.get(i);
    +
    + // Add the current constant struct to the right hand side of the IN clause.
    + currInStructExprList.add(ExprNodeGenericFuncDesc.newInstance
    + (FunctionRegistry.getFunctionInfo(STRUCT_UDF).getGenericUDF(),
    + STRUCT_UDF,
    + currConstStruct));
    + }
    +
    + // Add the newly generated IN clause to subExpr.
    + subExpr.add(new ExprNodeGenericFuncDesc(
    + TypeInfoFactory.booleanTypeInfo, FunctionRegistry.
    + getFunctionInfo(IN_UDF).getGenericUDF(), currInStructExprList));
    + }
    + /***************************************************************************************\
    + END : Actual processing of the IN (STRUCT(..)) expression.
    + /***************************************************************************************/
    +
    + // If there is only 1 table ALIAS, return it
    + if (subExpr.size() == 1) {
    + // Return the new expression containing only partition columns
    + return subExpr.get(0);
    + }
    + // Return the new expression containing only partition columns
    + // after concatenating them with AND operator
    + return new ExprNodeGenericFuncDesc(
    + TypeInfoFactory.booleanTypeInfo,
    + FunctionRegistry.getFunctionInfo(AND_UDF).getGenericUDF(), subExpr);
    + }
    + }
    +}

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
    index 4799b4d..a1a49cd 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
    @@ -18,14 +18,10 @@
      package org.apache.hadoop.hive.ql.optimizer;

      import java.util.ArrayList;
    -import java.util.Collection;
    -import java.util.Comparator;
      import java.util.HashMap;
    -import java.util.HashSet;
      import java.util.LinkedHashMap;
      import java.util.List;
      import java.util.Map;
    -import java.util.Set;
      import java.util.Stack;

      import org.apache.calcite.util.Pair;
    @@ -50,18 +46,15 @@ import org.apache.hadoop.hive.ql.parse.SemanticException;
      import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
      import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
      import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
    -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc.ExprNodeDescEqualityWrapper;
      import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
      import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
      import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
    -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
      import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
      import org.apache.hadoop.hive.ql.udf.generic.GenericUDFStruct;
      import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
      import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;

      import com.google.common.collect.ArrayListMultimap;
    -import com.google.common.collect.ImmutableSortedSet;
      import com.google.common.collect.ListMultimap;

      /**
    @@ -78,48 +71,14 @@ public class PointLookupOptimizer implements Transform {
                GenericUDFIn.class.getAnnotation(Description.class).name();
        private static final String STRUCT_UDF =
                GenericUDFStruct.class.getAnnotation(Description.class).name();
    - private static final String AND_UDF =
    - GenericUDFOPAnd.class.getAnnotation(Description.class).name();
    -
        // these are closure-bound for all the walkers in context
        public final int minOrExpr;
    - public final boolean extract;
    - public final boolean testMode;

        /*
         * Pass in configs and pre-create a parse context
         */
    - public PointLookupOptimizer(final int min, final boolean extract, final boolean testMode) {
    + public PointLookupOptimizer(final int min) {
          this.minOrExpr = min;
    - this.extract = extract;
    - this.testMode = testMode;
    - }
    -
    - // Hash Set iteration isn't ordered, but force string sorted order
    - // to get a consistent test run.
    - private Collection<ExprNodeDescEqualityWrapper> sortForTests(
    - Set<ExprNodeDescEqualityWrapper> valuesExpr) {
    - if (!testMode) {
    - // normal case - sorting is wasted for an IN()
    - return valuesExpr;
    - }
    - final Collection<ExprNodeDescEqualityWrapper> sortedValues;
    -
    - sortedValues = ImmutableSortedSet.copyOf(
    - new Comparator<ExprNodeDescEqualityWrapper>() {
    - @Override
    - public int compare(ExprNodeDescEqualityWrapper w1,
    - ExprNodeDescEqualityWrapper w2) {
    - // fail if you find nulls (this is a test-code section)
    - if (w1.equals(w2)) {
    - return 0;
    - }
    - return w1.getExprNodeDesc().getExprString()
    - .compareTo(w2.getExprNodeDesc().getExprString());
    - }
    - }, valuesExpr);
    -
    - return sortedValues;
        }

        @Override
    @@ -152,9 +111,6 @@ public class PointLookupOptimizer implements Transform {
              if (LOG.isDebugEnabled()) {
                LOG.debug("Generated new predicate with IN clause: " + newPredicate);
              }
    - if (!extract) {
    - filterOp.getConf().setOrigPredicate(predicate);
    - }
              filterOp.getConf().setPredicate(newPredicate);
            }

    @@ -326,50 +282,6 @@ public class PointLookupOptimizer implements Transform {
            newPredicate = new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo,
                    FunctionRegistry.getFunctionInfo(IN_UDF).getGenericUDF(), newChildren);

    - if (extract && columns.size() > 1) {
    - final List<ExprNodeDesc> subExpr = new ArrayList<ExprNodeDesc>(columns.size()+1);
    -
    - // extract pre-conditions for the tuple expressions
    - // (a,b) IN ((1,2),(2,3)) ->
    - // ((a) IN (1,2) and b in (2,3)) and (a,b) IN ((1,2),(2,3))
    -
    - for (String keyString : columnConstantsMap.keySet()) {
    - final Set<ExprNodeDescEqualityWrapper> valuesExpr =
    - new HashSet<ExprNodeDescEqualityWrapper>(children.size());
    - final List<Pair<ExprNodeColumnDesc, ExprNodeConstantDesc>> partial =
    - columnConstantsMap.get(keyString);
    - for (int i = 0; i < children.size(); i++) {
    - Pair<ExprNodeColumnDesc, ExprNodeConstantDesc> columnConstant = partial
    - .get(i);
    - valuesExpr
    - .add(new ExprNodeDescEqualityWrapper(columnConstant.right));
    - }
    - ExprNodeColumnDesc lookupCol = partial.get(0).left;
    - // generate a partial IN clause, if the column is a partition column
    - if (lookupCol.getIsPartitionColOrVirtualCol()
    - || valuesExpr.size() < children.size()) {
    - // optimize only nDV reductions
    - final List<ExprNodeDesc> inExpr = new ArrayList<ExprNodeDesc>();
    - inExpr.add(lookupCol);
    - for (ExprNodeDescEqualityWrapper value : sortForTests(valuesExpr)) {
    - inExpr.add(value.getExprNodeDesc());
    - }
    - subExpr.add(new ExprNodeGenericFuncDesc(
    - TypeInfoFactory.booleanTypeInfo, FunctionRegistry
    - .getFunctionInfo(IN_UDF).getGenericUDF(), inExpr));
    - }
    - }
    - // loop complete, inspect the sub expressions generated
    - if (subExpr.size() > 0) {
    - // add the newPredicate to the end & produce an AND clause
    - subExpr.add(newPredicate);
    - newPredicate = new ExprNodeGenericFuncDesc(
    - TypeInfoFactory.booleanTypeInfo, FunctionRegistry
    - .getFunctionInfo(AND_UDF).getGenericUDF(), subExpr);
    - }
    - // else, newPredicate is unmodified
    - }
    -
            return newPredicate;
          }


    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
    index 7cdc730..2ab1575 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
    @@ -48,9 +48,12 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
      import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
      import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
      import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
    +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
    +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFStruct;
      import org.apache.hadoop.hive.serde2.SerDeException;
      import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
      import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
    +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
      import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;

      /**
    @@ -364,6 +367,36 @@ public final class PcrExprProcFactory {
                return getResultWrapFromResults(results, fd, newNodeOutputs);
              }
              return new NodeInfoWrapper(WalkState.UNKNOWN, null, getOutExpr(fd, newNodeOutputs));
    + } else if (fd.getGenericUDF() instanceof GenericUDFIn) {
    + List<ExprNodeDesc> children = fd.getChildren();
    + boolean removePredElem = false;
    + ExprNodeDesc lhs = children.get(0);
    +
    + if (lhs instanceof ExprNodeGenericFuncDesc) {
    + // Make sure that the generic udf is deterministic
    + if (FunctionRegistry.isDeterministic(((ExprNodeGenericFuncDesc) lhs)
    + .getGenericUDF())) {
    + boolean hasOnlyPartCols = true;
    + for (ExprNodeDesc ed : ((ExprNodeGenericFuncDesc) lhs).getChildren()) {
    + // Check if the current field expression contains only
    + // partition column or a virtual column or constants.
    + // If yes, this filter predicate is a candidate for this optimization.
    + if (!(ed instanceof ExprNodeColumnDesc &&
    + ((ExprNodeColumnDesc)ed).getIsPartitionColOrVirtualCol())) {
    + hasOnlyPartCols = false;
    + break;
    + }
    + }
    + removePredElem = hasOnlyPartCols;
    + }
    + }
    +
    + // If removePredElem is set to true, return true as this is a potential candidate
    + // for partition condition remover. Else, set the WalkState for this node to unknown.
    + return removePredElem ?
    + new NodeInfoWrapper(WalkState.TRUE, null,
    + new ExprNodeConstantDesc(fd.getTypeInfo(), Boolean.TRUE)) :
    + new NodeInfoWrapper(WalkState.UNKNOWN, null, getOutExpr(fd, nodeOutputs)) ;
            } else if (!FunctionRegistry.isDeterministic(fd.getGenericUDF())) {
              // If it's a non-deterministic UDF, set unknown to true
              return new NodeInfoWrapper(WalkState.UNKNOWN, null,

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
    index 7262164..fd51628 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
    @@ -55,8 +55,7 @@ public final class OpProcFactory extends PrunerOperatorFactory {
              TableScanOperator top) throws SemanticException, UDFArgumentException {
            OpWalkerCtx owc = (OpWalkerCtx) procCtx;
            // Otherwise this is not a sampling predicate and we need to
    - ExprNodeDesc predicate = fop.getConf().getOrigPredicate();
    - predicate = predicate == null ? fop.getConf().getPredicate() : predicate;
    + ExprNodeDesc predicate = fop.getConf().getPredicate();
            String alias = top.getConf().getAlias();

            // Generate the partition pruning predicate

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
    index 6a31689..ccc4bb4 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
    @@ -79,7 +79,6 @@ public class FilterDesc extends AbstractOperatorDesc {

        private static final long serialVersionUID = 1L;
        private org.apache.hadoop.hive.ql.plan.ExprNodeDesc predicate;
    - private transient ExprNodeDesc origPredicate;
        private boolean isSamplingPred;
        private transient SampleDesc sampleDescr;
        //Is this a filter that should perform a comparison for sorted searches
    @@ -151,14 +150,6 @@ public class FilterDesc extends AbstractOperatorDesc {
          this.isSortedFilter = isSortedFilter;
        }

    - public void setOrigPredicate(ExprNodeDesc origPredicate) {
    - this.origPredicate = origPredicate;
    - }
    -
    - public ExprNodeDesc getOrigPredicate() {
    - return origPredicate;
    - }
    -
        /**
         * Some filters are generated or implied, which means it is not in the query.
         * It is added by the analyzer. For example, when we do an inner join, we add

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/queries/clientpositive/pcs.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/pcs.q b/ql/src/test/queries/clientpositive/pcs.q
    new file mode 100644
    index 0000000..4b35a4d
    --- /dev/null
    +++ b/ql/src/test/queries/clientpositive/pcs.q
    @@ -0,0 +1,66 @@
    +drop table pcs_t1;
    +drop table pcs_t2;
    +
    +create table pcs_t1 (key int, value string) partitioned by (ds string);
    +insert overwrite table pcs_t1 partition (ds='2000-04-08') select * from src where key < 20 order by key;
    +insert overwrite table pcs_t1 partition (ds='2000-04-09') select * from src where key < 20 order by key;
    +insert overwrite table pcs_t1 partition (ds='2000-04-10') select * from src where key < 20 order by key;
    +
    +analyze table pcs_t1 partition(ds) compute statistics;
    +analyze table pcs_t1 partition(ds) compute statistics for columns;
    +
    +set hive.optimize.point.lookup = true;
    +set hive.optimize.point.lookup.min = 1;
    +
    +explain extended select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds;
    +select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds;
    +
    +set hive.optimize.point.lookup = false;
    +set hive.optimize.partition.columns.separate=true;
    +set hive.optimize.ppd=true;
    +
    +explain extended select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2));
    +select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2));
    +
    +explain extended select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4));
    +select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4));
    +
    +explain extended select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'));
    +
    +select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'));
    +
    +explain extended select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2));
    +select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2));
    +
    +explain select lag(key) over (partition by key) as c1
    +from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2));
    +select lag(key) over (partition by key) as c1
    +from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2));
    +
    +EXPLAIN EXTENDED
    +SELECT * FROM (
    + SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    + UNION ALL
    + SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +) A
    +WHERE A.ds = '2008-04-08'
    +SORT BY A.key, A.value, A.ds;
    +
    +SELECT * FROM (
    + SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    + UNION ALL
    + SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
    +) A
    +WHERE A.ds = '2008-04-08'
    +SORT BY A.key, A.value, A.ds;
    +
    +explain extended select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11));
    +select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11));
    +
    +explain extended select ds from pcs_t1 where struct(ds, key, rand(100)) in (struct('2000-04-08',1,0.2), struct('2000-04-09',2,0.3));
    +
    +explain extended select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3));
    +select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3));
    +
    +explain extended select ds from pcs_t1 where key = 3 or (struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3)) and key+5 > 0);
    +select ds from pcs_t1 where key = 3 or (struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3)) and key+5 > 0);
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/queries/clientpositive/pointlookup.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/pointlookup.q b/ql/src/test/queries/clientpositive/pointlookup.q
    index 1aef2ef..c460f39 100644
    --- a/ql/src/test/queries/clientpositive/pointlookup.q
    +++ b/ql/src/test/queries/clientpositive/pointlookup.q
    @@ -18,8 +18,7 @@ WHERE


      set hive.optimize.point.lookup.min=3;
    -set hive.optimize.point.lookup.extract=false;
    -
    +set hive.optimize.partition.columns.separate=false;
      explain
      SELECT key
      FROM src
    @@ -38,8 +37,7 @@ WHERE
         AND value = '3'))
      ;

    -set hive.optimize.point.lookup.extract=true;
    -
    +set hive.optimize.partition.columns.separate=true;
      explain
      SELECT key
      FROM src

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/queries/clientpositive/pointlookup2.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/pointlookup2.q b/ql/src/test/queries/clientpositive/pointlookup2.q
    index 31bebbb..94e99fb 100644
    --- a/ql/src/test/queries/clientpositive/pointlookup2.q
    +++ b/ql/src/test/queries/clientpositive/pointlookup2.q
    @@ -14,7 +14,7 @@ from pcr_t1
      insert overwrite table pcr_t2 select ds, key, value where ds='2000-04-08' and key=2;

      set hive.optimize.point.lookup.min=2;
    -set hive.optimize.point.lookup.extract=true;
    +set hive.optimize.partition.columns.separate=true;

      explain extended
      select key, value, ds

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/queries/clientpositive/pointlookup3.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/pointlookup3.q b/ql/src/test/queries/clientpositive/pointlookup3.q
    index 3daa94b..79e7348 100644
    --- a/ql/src/test/queries/clientpositive/pointlookup3.q
    +++ b/ql/src/test/queries/clientpositive/pointlookup3.q
    @@ -6,7 +6,7 @@ insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') sel
      insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key;

      set hive.optimize.point.lookup.min=2;
    -set hive.optimize.point.lookup.extract=true;
    +set hive.optimize.partition.columns.separate=true;

      explain extended
      select key, value, ds1, ds2

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
    index eca29df..ddb05e2 100644
    --- a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
    +++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
    @@ -153,7 +153,7 @@ STAGE PLANS:
                TableScan
                  alias: acid
                  Filter Operator
    - predicate: (key = 'foo') (type: boolean)
    + predicate: ((key = 'foo') and (ds) IN ('2008-04-08')) (type: boolean)
                    Select Operator
                      expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), ds (type: string)
                      outputColumnNames: _col0, _col3
    @@ -390,7 +390,7 @@ STAGE PLANS:
                TableScan
                  alias: acid
                  Filter Operator
    - predicate: (key = 'foo') (type: boolean)
    + predicate: ((key = 'foo') and (ds) IN ('2008-04-08')) (type: boolean)
                    Select Operator
                      expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), ds (type: string)
                      outputColumnNames: _col0, _col3

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
    index 4320f01..7b428bc 100644
    --- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
    +++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
    @@ -1275,21 +1275,6 @@ STAGE PLANS:
                            sort order: +
                            Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                            Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator
    - keys: _col0 (type: double)
    - mode: hash
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Dynamic Partitioning Event Operator
    - Target Input: srcpart
    - Partition key expr: UDFToDouble(hr)
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Target column: hr
    - Target Vertex: Map 1
                  Execution mode: llap
              Reducer 2
                  Execution mode: llap
    @@ -4076,21 +4061,6 @@ STAGE PLANS:
                            sort order: +
                            Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                            Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator
    - keys: _col0 (type: double)
    - mode: hash
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Dynamic Partitioning Event Operator
    - Target Input: srcpart
    - Partition key expr: UDFToDouble(hr)
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
    - Target column: hr
    - Target Vertex: Map 1
                  Execution mode: llap
              Reducer 2
                  Execution mode: uber
    @@ -5229,21 +5199,6 @@ STAGE PLANS:
                                Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                                Target column: ds
                                Target Vertex: Map 1
    - Select Operator
    - expressions: UDFToDouble(hr) (type: double)
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator
    - keys: _col0 (type: double)
    - mode: hash
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
    - Dynamic Partitioning Event Operator
    - Target Input: srcpart_orc
    - Partition key expr: UDFToDouble(hr)
    - Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
    - Target column: hr
    - Target Vertex: Map 1
                  Execution mode: llap
              Reducer 2
                  Execution mode: uber

    http://git-wip-us.apache.org/repos/asf/hive/blob/c9246f44/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
    index e30465d..e9192a3 100644
    --- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
    +++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
    @@ -1275,21 +1275,6 @@ STAGE PLANS:
                            sort order: +
                            Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                            Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator
    - keys: _col0 (type: double)
    - mode: hash
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Dynamic Partitioning Event Operator
    - Target Input: srcpart
    - Partition key expr: UDFToDouble(hr)
    - Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Target column: hr
    - Target Vertex: Map 1
                  Execution mode: llap
              Reducer 2
                  Execution mode: llap
    @@ -4076,21 +4061,6 @@ STAGE PLANS:
                            sort order: +
                            Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
                            Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator
    - keys: _col0 (type: double)
    - mode: hash
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Dynamic Partitioning Event Operator
    - Target Input: srcpart
    - Partition key expr: UDFToDouble(hr)
    - Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
    - Target column: hr
    - Target Vertex: Map 1
                  Execution mode: llap
              Reducer 2
                  Execution mode: vectorized, uber
    @@ -5229,21 +5199,6 @@ STAGE PLANS:
                                Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                                Target column: ds
                                Target Vertex: Map 1
    - Select Operator
    - expressions: UDFToDouble(hr) (type: double)
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator
    - keys: _col0 (type: double)
    - mode: hash
    - outputColumnNames: _col0
    - Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
    - Dynamic Partitioning Event Operator
    - Target Input: srcpart_orc
    - Partition key expr: UDFToDouble(hr)
    - Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
    - Target column: hr
    - Target Vertex: Map 1
                  Execution mode: llap
              Reducer 2
                  Execution mode: uber
  • Jxiang at Nov 6, 2015 at 5:32 pm
    Revert inadvertant addition of HiveConf.java.orig file


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/37f05f41
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/37f05f41
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/37f05f41

    Branch: refs/heads/master-fixed
    Commit: 37f05f410c5243ac6935feee267069cd246c9b38
    Parents: 11f5d44
    Author: Matt McCline <mmccline@hortonworks.com>
    Authored: Wed Nov 4 14:18:03 2015 -0800
    Committer: Matt McCline <mmccline@hortonworks.com>
    Committed: Wed Nov 4 14:18:03 2015 -0800

    ----------------------------------------------------------------------
      .../apache/hadoop/hive/conf/HiveConf.java.orig | 3372 ------------------
      1 file changed, 3372 deletions(-)
    ----------------------------------------------------------------------
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-11718 JDBC ResultSet.setFetchSize(0) returns no results (Aleksei Statkevich via Alan Gates)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/902a548e
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/902a548e
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/902a548e

    Branch: refs/heads/master-fixed
    Commit: 902a548ea5f52481436c2ef99753d8cd34c666dc
    Parents: de1fe68
    Author: Alan Gates <gates@hortonworks.com>
    Authored: Mon Nov 2 16:14:32 2015 -0800
    Committer: Alan Gates <gates@hortonworks.com>
    Committed: Mon Nov 2 16:14:32 2015 -0800

    ----------------------------------------------------------------------
      jdbc/pom.xml | 8 +++++
      .../org/apache/hive/jdbc/HiveStatement.java | 14 +++++++--
      .../org/apache/hive/jdbc/HiveStatementTest.java | 31 ++++++++++++++++++++
      3 files changed, 51 insertions(+), 2 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/902a548e/jdbc/pom.xml
    ----------------------------------------------------------------------
    diff --git a/jdbc/pom.xml b/jdbc/pom.xml
    index dadf9c3..ea961a4 100644
    --- a/jdbc/pom.xml
    +++ b/jdbc/pom.xml
    @@ -104,6 +104,13 @@
            <version>${hadoop.version}</version>
            <optional>true</optional>
          </dependency>
    + <!-- test inter-project -->
    + <dependency>
    + <groupId>junit</groupId>
    + <artifactId>junit</artifactId>
    + <version>${junit.version}</version>
    + <scope>test</scope>
    + </dependency>
        </dependencies>

        <profiles>
    @@ -117,6 +124,7 @@

        <build>
          <sourceDirectory>${basedir}/src/java</sourceDirectory>
    + <testSourceDirectory>${basedir}/src/test</testSourceDirectory>
          <resources>
            <resource>
              <directory>${basedir}/src/resources</directory>

    http://git-wip-us.apache.org/repos/asf/hive/blob/902a548e/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
    ----------------------------------------------------------------------
    diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
    index 25456af..180f99e8 100644
    --- a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
    +++ b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
    @@ -53,12 +53,13 @@ import org.slf4j.LoggerFactory;
       */
      public class HiveStatement implements java.sql.Statement {
        public static final Logger LOG = LoggerFactory.getLogger(HiveStatement.class.getName());
    + private static final int DEFAULT_FETCH_SIZE = 1000;
        private final HiveConnection connection;
        private TCLIService.Iface client;
        private TOperationHandle stmtHandle = null;
        private final TSessionHandle sessHandle;
        Map<String,String> sessConf = new HashMap<String,String>();
    - private int fetchSize = 1000;
    + private int fetchSize = DEFAULT_FETCH_SIZE;
        private boolean isScrollableResultset = false;
        /**
         * We need to keep a reference to the result set to support the following:
    @@ -673,7 +674,16 @@ public class HiveStatement implements java.sql.Statement {
        @Override
        public void setFetchSize(int rows) throws SQLException {
          checkConnection("setFetchSize");
    - fetchSize = rows;
    + if (rows > 0) {
    + fetchSize = rows;
    + } else if (rows == 0) {
    + // Javadoc for Statement interface states that if the value is zero
    + // then "fetch size" hint is ignored.
    + // In this case it means reverting it to the default value.
    + fetchSize = DEFAULT_FETCH_SIZE;
    + } else {
    + throw new SQLException("Fetch size must be greater or equal to 0");
    + }
        }

        /*

    http://git-wip-us.apache.org/repos/asf/hive/blob/902a548e/jdbc/src/test/org/apache/hive/jdbc/HiveStatementTest.java
    ----------------------------------------------------------------------
    diff --git a/jdbc/src/test/org/apache/hive/jdbc/HiveStatementTest.java b/jdbc/src/test/org/apache/hive/jdbc/HiveStatementTest.java
    new file mode 100644
    index 0000000..be23b10
    --- /dev/null
    +++ b/jdbc/src/test/org/apache/hive/jdbc/HiveStatementTest.java
    @@ -0,0 +1,31 @@
    +package org.apache.hive.jdbc;
    +
    +import org.junit.Test;
    +
    +import java.sql.SQLException;
    +
    +import static org.junit.Assert.assertEquals;
    +
    +public class HiveStatementTest {
    +
    + @Test
    + public void testSetFetchSize1() throws SQLException {
    + HiveStatement stmt = new HiveStatement(null, null, null);
    + stmt.setFetchSize(123);
    + assertEquals(123, stmt.getFetchSize());
    + }
    +
    + @Test
    + public void testSetFetchSize2() throws SQLException {
    + HiveStatement stmt = new HiveStatement(null, null, null);
    + int initial = stmt.getFetchSize();
    + stmt.setFetchSize(0);
    + assertEquals(initial, stmt.getFetchSize());
    + }
    +
    + @Test(expected = SQLException.class)
    + public void testSetFetchSize3() throws SQLException {
    + HiveStatement stmt = new HiveStatement(null, null, null);
    + stmt.setFetchSize(-1);
    + }
    +}
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-12273: Improve user level explain (Pengcheng Xiong, reviewed by Ashutosh Chauhan, Laljo John Pullokkaran, Eugene Koifman, Prasanth Jayachandran and Wei Zheng)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6577f55c
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6577f55c
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6577f55c

    Branch: refs/heads/master-fixed
    Commit: 6577f55cd7f21568994638399f9c31bef578b5cc
    Parents: d5fdeed
    Author: Pengcheng Xiong <pxiong@apache.org>
    Authored: Tue Nov 3 10:11:18 2015 -0800
    Committer: Pengcheng Xiong <pxiong@apache.org>
    Committed: Tue Nov 3 10:11:18 2015 -0800

    ----------------------------------------------------------------------
      .../hive/common/jsonexplain/tez/Vertex.java | 9 +-
      .../apache/hadoop/hive/ql/plan/MapJoinDesc.java | 4 +-
      .../org/apache/hadoop/hive/ql/plan/MapWork.java | 2 +-
      .../apache/hadoop/hive/ql/plan/ReduceWork.java | 2 +-
      .../hadoop/hive/ql/plan/TableScanDesc.java | 7 +
      .../test/queries/clientpositive/explainuser_3.q | 46 +++-
      .../clientpositive/llap/constprog_dpp.q.out | 10 +-
      .../clientpositive/tez/explainuser_1.q.out | 16 ++
      .../clientpositive/tez/explainuser_2.q.out | 38 +++
      .../clientpositive/tez/explainuser_3.q.out | 230 ++++++++++++++++++-
      .../tez/vector_aggregate_without_gby.q.out | 4 +-
      .../tez/vector_auto_smb_mapjoin_14.q.out | 32 +--
      .../tez/vectorized_parquet_types.q.out | 2 +-
      13 files changed, 363 insertions(+), 39 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
    ----------------------------------------------------------------------
    diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
    index 67ff8eb..be01b8b 100644
    --- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
    +++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
    @@ -50,6 +50,8 @@ public final class Vertex implements Comparable<Vertex>{
        public final List<Vertex> mergeJoinDummyVertexs = new ArrayList<>();
        // whether this vertex has multiple reduce operators
        public boolean hasMultiReduceOp = false;
    + // execution mode
    + public String executionMode = "";

        public Vertex(String name, JSONObject vertexObject, TezJsonParser tezJsonParser) {
          super();
    @@ -103,6 +105,8 @@ public final class Vertex implements Comparable<Vertex>{
                } else {
                  throw new Exception("Merge File Operator does not have a Map Operator Tree");
                }
    + } else if (key.equals("Execution mode:")) {
    + executionMode = " " + vertexObject.getString(key);
              } else {
                throw new Exception("Unsupported operator tree in vertex " + this.name);
              }
    @@ -189,9 +193,10 @@ public final class Vertex implements Comparable<Vertex>{
          }
          parser.printSet.add(this);
          if (type != null) {
    - printer.println(TezJsonParser.prefixString(indentFlag, "|<-") + this.name + " [" + type + "]");
    + printer.println(TezJsonParser.prefixString(indentFlag, "|<-") + this.name + " [" + type + "]"
    + + this.executionMode);
          } else if (this.name != null) {
    - printer.println(TezJsonParser.prefixString(indentFlag) + this.name);
    + printer.println(TezJsonParser.prefixString(indentFlag) + this.name + this.executionMode);
          }
          // print operators
          if (hasMultiReduceOp && !callingVertex.union) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
    index e27b89b..4b93e7c 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
    @@ -334,7 +334,7 @@ public class MapJoinDesc extends JoinDesc implements Serializable {
          this.bigTablePartSpecToFileMapping = partToFileMapping;
        }

    - @Explain(displayName = "BucketMapJoin", explainLevels = { Level.EXTENDED }, displayOnlyOnTrue = true)
    + @Explain(displayName = "BucketMapJoin", explainLevels = { Level.USER, Level.EXTENDED }, displayOnlyOnTrue = true)
        public boolean isBucketMapJoin() {
          return isBucketMapJoin;
        }
    @@ -343,7 +343,7 @@ public class MapJoinDesc extends JoinDesc implements Serializable {
          this.isBucketMapJoin = isBucketMapJoin;
        }

    - @Explain(displayName = "HybridGraceHashJoin", displayOnlyOnTrue = true)
    + @Explain(displayName = "HybridGraceHashJoin", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }, displayOnlyOnTrue = true)
        public boolean isHybridHashJoin() {
          return isHybridHashJoin;
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
    index 87c15a2..d349934 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
    @@ -340,7 +340,7 @@ public class MapWork extends BaseWork {
          }
        }

    - @Explain(displayName = "Execution mode")
    + @Explain(displayName = "Execution mode", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
        public String getExecutionMode() {
          if (vectorMode) {
            if (llapMode) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
    index 0222c23..8211346 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
    @@ -146,7 +146,7 @@ public class ReduceWork extends BaseWork {
          this.tagToValueDesc = tagToValueDesc;
        }

    - @Explain(displayName = "Execution mode")
    + @Explain(displayName = "Execution mode", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
        public String getExecutionMode() {
          if (vectorMode) {
            if (llapMode) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
    index 6661ce6..be7139c 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
    @@ -26,6 +26,8 @@ import java.util.Map;
      import org.apache.hadoop.hive.ql.exec.PTFUtils;
      import org.apache.hadoop.hive.ql.metadata.Table;
      import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
    +import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
    +import org.apache.hadoop.hive.ql.parse.SemanticException;
      import org.apache.hadoop.hive.ql.parse.TableSample;
      import org.apache.hadoop.hive.ql.plan.Explain.Level;

    @@ -135,6 +137,11 @@ public class TableScanDesc extends AbstractOperatorDesc {
          return alias;
        }

    + @Explain(displayName = "ACID table", explainLevels = { Level.USER }, displayOnlyOnTrue = true)
    + public boolean isAcidTable() {
    + return SemanticAnalyzer.isAcidTable(this.tableMetadata);
    + }
    +
        @Explain(displayName = "filterExpr")
        public String getFilterExprString() {
          StringBuilder sb = new StringBuilder();

    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/queries/clientpositive/explainuser_3.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/explainuser_3.q b/ql/src/test/queries/clientpositive/explainuser_3.q
    index 16237bb..f604d38 100644
    --- a/ql/src/test/queries/clientpositive/explainuser_3.q
    +++ b/ql/src/test/queries/clientpositive/explainuser_3.q
    @@ -1,5 +1,15 @@
      set hive.explain.user=true;

    +set hive.support.concurrency=true;
    +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
    +set hive.enforce.bucketing=true;
    +set hive.exec.dynamic.partition.mode=nonstrict;
    +set hive.vectorized.execution.enabled=true;
    +
    +CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');
    +insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10;
    +explain select a, b from acid_vectorized order by a, b;
    +
      explain select key, value
      FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol;

    @@ -112,4 +122,38 @@ set hive.merge.mapredfiles=true;

      explain insert overwrite table orc_merge5 select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;

    -drop table orc_merge5;
    \ No newline at end of file
    +drop table orc_merge5;
    +
    +set hive.auto.convert.join=true;
    +set hive.auto.convert.join.noconditionaltask=true;
    +set hive.auto.convert.join.noconditionaltask.size=10000;
    +
    +CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
    +CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
    +CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
    +
    +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
    +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
    +
    +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
    +load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
    +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
    +load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
    +
    +set hive.enforce.bucketing=true;
    +set hive.enforce.sorting = true;
    +set hive.optimize.bucketingsorting=false;
    +insert overwrite table tab_part partition (ds='2008-04-08')
    +select key,value from srcbucket_mapjoin_part;
    +
    +CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
    +insert overwrite table tab partition (ds='2008-04-08')
    +select key,value from srcbucket_mapjoin;
    +
    +set hive.convert.join.bucket.mapjoin.tez = true;
    +explain
    +select a.key, a.value, b.value
    +from tab a join tab_part b on a.key = b.key;
    +
    +
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out b/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out
    index 0bc964b..72a5d0d 100644
    --- a/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out
    +++ b/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out
    @@ -50,7 +50,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 4
    + Reducer 4 llap
               File Output Operator [FS_16]
                  compressed:false
                  Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    @@ -60,7 +60,7 @@ Stage-0
    keys:{"0":"id (type: int)","1":"_col0 (type: int)"}
    outputColumnNames:["_col0"]
    Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    - |<-Map 6 [SIMPLE_EDGE]
    + |<-Map 6 [SIMPLE_EDGE] llap
    Reduce Output Operator [RS_12]
    key expressions:id (type: int)
    Map-reduce partition columns:id (type: int)
    @@ -69,7 +69,7 @@ Stage-0
    TableScan [TS_11]
    alias:a
    Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    - |<-Reducer 3 [SIMPLE_EDGE]
    + |<-Reducer 3 [SIMPLE_EDGE] llap
                     Reduce Output Operator [RS_13]
                        key expressions:_col0 (type: int)
                        Map-reduce partition columns:_col0 (type: int)
    @@ -82,7 +82,7 @@ Stage-0
    outputColumnNames:["_col0"]
    Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
    <-Union 2 [SIMPLE_EDGE]
    - |<-Map 1 [CONTAINS]
    + |<-Map 1 [CONTAINS] llap
    Reduce Output Operator [RS_8]
    sort order:
    Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
    @@ -96,7 +96,7 @@ Stage-0
    TableScan [TS_0]
    alias:tb2
    Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    - |<-Map 5 [CONTAINS]
    + |<-Map 5 [CONTAINS] llap
                                 Reduce Output Operator [RS_8]
                                    sort order:
                                    Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE

    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
    index ec434f0..ee70033 100644
    --- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
    +++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
    @@ -4683,6 +4683,7 @@ Stage-0
                                          Statistics:Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
                                          Map Join Operator [MAPJOIN_25]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"_col0 (type: string)","Map 4":"_col0 (type: string)"}
    outputColumnNames:["_col1"]
    Statistics:Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
    @@ -6418,6 +6419,7 @@ Stage-0
    value expressions:_col0 (type: string)
    Map Join Operator [MAPJOIN_28]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 1":"_col0 (type: string)","Map 3":"_col0 (type: string)"}
    outputColumnNames:["_col0","_col1"]
    Statistics:Num rows: 241 Data size: 42898 Basic stats: COMPLETE Column stats: COMPLETE
    @@ -6494,6 +6496,7 @@ Stage-0
    value expressions:_col0 (type: string)
    Map Join Operator [MAPJOIN_28]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 1":"_col0 (type: string)","Map 3":"_col0 (type: string)"}
    outputColumnNames:["_col0","_col1"]
    Statistics:Num rows: 241 Data size: 42898 Basic stats: COMPLETE Column stats: COMPLETE
    @@ -6570,6 +6573,7 @@ Stage-0
    value expressions:_col0 (type: string)
    Map Join Operator [MAPJOIN_28]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 1":"_col0 (type: string)","Map 3":"_col0 (type: string)"}
    outputColumnNames:["_col0","_col1"]
    Statistics:Num rows: 241 Data size: 42898 Basic stats: COMPLETE Column stats: COMPLETE
    @@ -6742,6 +6746,7 @@ Stage-0
                                       value expressions:_col5 (type: int)
                                       Map Join Operator [MAPJOIN_21]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"p_partkey (type: int)","Map 4":"p_partkey (type: int)"}
    outputColumnNames:["_col1","_col2","_col5"]
    Statistics:Num rows: 29 Data size: 6467 Basic stats: COMPLETE Column stats: COMPLETE
    @@ -7014,6 +7019,7 @@ Stage-0
                  table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
                  Map Join Operator [MAPJOIN_16]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Reducer 2":"_col0 (type: int)","Map 3":"p_partkey (type: int)"}
    outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
    Statistics:Num rows: 29 Data size: 17951 Basic stats: COMPLETE Column stats: COMPLETE
    @@ -7463,6 +7469,7 @@ Stage-0
                              value expressions:_col5 (type: int), _col7 (type: double)
                              Map Join Operator [MAPJOIN_20]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Reducer 2":"_col0 (type: int)","Map 4":"p_partkey (type: int)"}
    outputColumnNames:["_col1","_col2","_col5","_col7"]
    Statistics:Num rows: 29 Data size: 6699 Basic stats: COMPLETE Column stats: COMPLETE
    @@ -8247,6 +8254,7 @@ Stage-0
                     Statistics:Num rows: 3 Data size: 99 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator [MAPJOIN_31]
    condition map:[{"":"Inner Join 0 to 1"},{"":"Inner Join 1 to 2"},{"":"Inner Join 2 to 3"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"key (type: string)","Map 2":"key (type: string)","Map 3":"key (type: string)","Map 4":"key (type: string)"}
    outputColumnNames:["_col0","_col1","_col5","_col6","_col10","_col11","_col15","_col16"]
    Statistics:Num rows: 3 Data size: 99 Basic stats: COMPLETE Column stats: NONE
    @@ -8327,6 +8335,7 @@ Stage-0
                     Statistics:Num rows: 3 Data size: 99 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator [MAPJOIN_31]
    condition map:[{"":"Inner Join 0 to 1"},{"":"Inner Join 1 to 2"},{"":"Inner Join 2 to 3"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"key (type: string)","Map 2":"key (type: string)","Map 3":"key (type: string)","Map 4":"key (type: string)"}
    outputColumnNames:["_col0","_col1","_col5","_col6","_col10","_col11","_col15","_col16"]
    Statistics:Num rows: 3 Data size: 99 Basic stats: COMPLETE Column stats: NONE
    @@ -8417,6 +8426,7 @@ Stage-0
                              value expressions:hash(_col0) (type: int), hash(_col1) (type: int), hash(_col5) (type: int)
                              Map Join Operator [MAPJOIN_18]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"UDFToDouble(key) (type: double)","Map 2":"(key + 1) (type: double)"}
    outputColumnNames:["_col0","_col1","_col5"]
    Statistics:Num rows: 275 Data size: 23925 Basic stats: COMPLETE Column stats: NONE
    @@ -8558,6 +8568,7 @@ Stage-0
                              value expressions:hash(_col0) (type: int), hash(_col6) (type: int)
                              Map Join Operator [MAPJOIN_18]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"key (type: string)","Map 4":"val (type: string)"}
    outputColumnNames:["_col0","_col6"]
    Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
    @@ -8621,6 +8632,7 @@ Stage-0
                              value expressions:hash(_col0) (type: int), hash(_col6) (type: int)
                              Map Join Operator [MAPJOIN_18]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"key (type: string)","Map 4":"key (type: string)"}
    outputColumnNames:["_col0","_col6"]
    Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
    @@ -8684,6 +8696,7 @@ Stage-0
                              Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
                              Map Join Operator [MAPJOIN_18]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"key (type: string)","Map 4":"key (type: string)"}
    Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
    <-Map 4 [BROADCAST_EDGE]
    @@ -8746,6 +8759,7 @@ Stage-0
                              value expressions:hash(_col0) (type: int), hash(_col1) (type: int), hash(_col5) (type: int)
                              Map Join Operator [MAPJOIN_14]
    condition map:[{"":"Left Outer Join0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"UDFToDouble(key) (type: double)","Map 4":"(key + 1) (type: double)"}
    outputColumnNames:["_col0","_col1","_col5"]
    Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
    @@ -8804,6 +8818,7 @@ Stage-0
                              value expressions:hash(_col0) (type: int), hash(_col1) (type: int), hash(_col5) (type: int)
                              Map Join Operator [MAPJOIN_14]
    condition map:[{"":"Right Outer Join0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"UDFToDouble(key) (type: double)","Map 2":"(key + 1) (type: double)"}
    outputColumnNames:["_col0","_col1","_col5"]
    Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
    @@ -8927,6 +8942,7 @@ Stage-0
                              value expressions:hash(_col0) (type: int), hash(_col6) (type: int)
                              Map Join Operator [MAPJOIN_14]
    condition map:[{"":"Left Outer Join0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"(key + 1) (type: double)","Map 4":"UDFToDouble(key) (type: double)"}
    outputColumnNames:["_col0","_col6"]
    Statistics:Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
    index 7b361ac..ff055ea 100644
    --- a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
    +++ b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
    @@ -1405,6 +1405,7 @@ Stage-0
                     Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator [MAPJOIN_28]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"_col3 (type: string)","Map 3":"_col0 (type: string)"}
    outputColumnNames:["_col0","_col3","_col6"]
    Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
    @@ -1426,6 +1427,7 @@ Stage-0
    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    <-Map Join Operator [MAPJOIN_27]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 1":"_col0 (type: string)","Map 2":"_col1 (type: string)"}
    outputColumnNames:["_col0","_col3"]
    Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    @@ -1566,6 +1568,7 @@ Stage-0
                                          Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                                          Map Join Operator [MAPJOIN_110]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 2":"_col1 (type: string), _col3 (type: string)","Map 3":"_col15 (type: string), _col17 (type: string)"}
    outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"]
    Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
    @@ -1578,6 +1581,7 @@ Stage-0
    value expressions:_col2 (type: string)
    Map Join Operator [MAPJOIN_104]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 1":"_col0 (type: string)","Map 2":"_col0 (type: string)"}
    outputColumnNames:["_col1","_col2","_col3"]
    Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
    @@ -1611,6 +1615,7 @@ Stage-0
                                                Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
                                                Map Join Operator [MAPJOIN_109]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 3":"_col4 (type: string), _col6 (type: string)","Map 10":"_col2 (type: string), _col4 (type: string)"}
    outputColumnNames:["_col2","_col3","_col14","_col15","_col17"]
    Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
    @@ -1623,6 +1628,7 @@ Stage-0
    value expressions:_col3 (type: string), _col5 (type: string)
    Map Join Operator [MAPJOIN_108]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 9":"_col0 (type: string)","Map 10":"_col0 (type: string)"}
    outputColumnNames:["_col2","_col3","_col4","_col5"]
    Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
    @@ -1653,6 +1659,7 @@ Stage-0
    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    <-Map Join Operator [MAPJOIN_107]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 3":"_col3 (type: string)","Map 8":"_col1 (type: string)"}
    outputColumnNames:["_col2","_col3","_col4","_col6"]
    Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
    @@ -1673,6 +1680,7 @@ Stage-0
    Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    <-Map Join Operator [MAPJOIN_106]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 3":"_col2 (type: string)","Map 7":"_col0 (type: string)"}
    outputColumnNames:["_col2","_col3","_col4","_col6"]
    Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
    @@ -1693,6 +1701,7 @@ Stage-0
    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    <-Map Join Operator [MAPJOIN_105]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 3":"_col1 (type: string)","Map 6":"_col3 (type: string)"}
    outputColumnNames:["_col2","_col3","_col4","_col6"]
    Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
    @@ -1780,6 +1789,7 @@ Stage-0
    Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_85]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Reducer 11":"_col2 (type: string)","Map 14":"_col0 (type: string)"}
    outputColumnNames:["_col1","_col2"]
    Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    @@ -1800,6 +1810,7 @@ Stage-0
    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    <-Map Join Operator [MAPJOIN_84]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Reducer 11":"_col1 (type: string)","Map 13":"_col1 (type: string)"}
    outputColumnNames:["_col1","_col2"]
    Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
    @@ -1880,6 +1891,7 @@ Stage-0
                                 Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                                 Map Join Operator [MAPJOIN_83]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Reducer 3":"_col2 (type: string)","Map 8":"_col0 (type: string)"}
    outputColumnNames:["_col1","_col2"]
    Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    @@ -1900,6 +1912,7 @@ Stage-0
    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    <-Map Join Operator [MAPJOIN_82]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Reducer 3":"_col1 (type: string)","Map 7":"_col1 (type: string)"}
    outputColumnNames:["_col1","_col2"]
    Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
    @@ -2042,6 +2055,7 @@ Stage-0
    Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_167]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Reducer 26":"_col2 (type: string)","Map 31":"_col0 (type: string)"}
    outputColumnNames:["_col2","_col5"]
    Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    @@ -2063,6 +2077,7 @@ Stage-0
    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    <-Map Join Operator [MAPJOIN_166]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Reducer 26":"_col1 (type: string)","Map 30":"_col1 (type: string)"}
    outputColumnNames:["_col2"]
    Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
    @@ -2226,6 +2241,7 @@ Stage-0
    Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_165]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Reducer 15":"_col2 (type: string)","Map 19":"_col0 (type: string)"}
    outputColumnNames:["_col2","_col5"]
    Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    @@ -2247,6 +2263,7 @@ Stage-0
    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    <-Map Join Operator [MAPJOIN_164]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Reducer 15":"_col1 (type: string)","Map 18":"_col1 (type: string)"}
    outputColumnNames:["_col2"]
    Statistics:Num rows: 209 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
    @@ -2361,6 +2378,7 @@ Stage-0
                                             Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                                             Map Join Operator [MAPJOIN_163]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Reducer 3":"_col2 (type: string)","Map 10":"_col0 (type: string)"}
    outputColumnNames:["_col2","_col5"]
    Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    @@ -2382,6 +2400,7 @@ Stage-0
    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    <-Map Join Operator [MAPJOIN_162]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Reducer 3":"_col1 (type: string)","Map 9":"_col1 (type: string)"}
    outputColumnNames:["_col2"]
    Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
    @@ -3058,6 +3077,7 @@ Stage-0
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_120]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 17":"_col1 (type: string)","Map 18":"_col0 (type: string)"}
    outputColumnNames:["_col0","_col3"]
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    @@ -3070,6 +3090,7 @@ Stage-0
    value expressions:_col0 (type: string), _col3 (type: string)
    Map Join Operator [MAPJOIN_119]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | | HybridGraceHashJoin:true
    keys:{"Map 16":"_col0 (type: string)","Map 17":"_col0 (type: string)"}
    outputColumnNames:["_col0","_col1","_col3"]
    Statistics:Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
    @@ -3138,6 +3159,7 @@ Stage-0
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_120]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 17":"_col1 (type: string)","Map 19":"_col0 (type: string)"}
    outputColumnNames:["_col0","_col3"]
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    @@ -3161,6 +3183,7 @@ Stage-0
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_120]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 17":"_col1 (type: string)","Map 20":"_col0 (type: string)"}
    outputColumnNames:["_col0","_col3"]
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    @@ -3184,6 +3207,7 @@ Stage-0
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_120]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 17":"_col1 (type: string)","Map 21":"_col0 (type: string)"}
    outputColumnNames:["_col0","_col3"]
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    @@ -3337,6 +3361,7 @@ Stage-0
    Statistics:Num rows: 289 Data size: 3030 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_115]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 1":"_col0 (type: string)","Map 6":"_col1 (type: string)"}
    outputColumnNames:["_col1"]
    Statistics:Num rows: 289 Data size: 3030 Basic stats: COMPLETE Column stats: NONE
    @@ -3380,6 +3405,7 @@ Stage-0
                                 Statistics:Num rows: 289 Data size: 3030 Basic stats: COMPLETE Column stats: NONE
                                 Map Join Operator [MAPJOIN_115]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Map 5":"_col0 (type: string)","Map 6":"_col1 (type: string)"}
    outputColumnNames:["_col1"]
    Statistics:Num rows: 289 Data size: 3030 Basic stats: COMPLETE Column stats: NONE
    @@ -3474,6 +3500,7 @@ Stage-0
    Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_164]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 24":"_col1 (type: string)","Reducer 31":"_col1 (type: string)"}
    outputColumnNames:["_col0","_col3"]
    Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
    @@ -3486,6 +3513,7 @@ Stage-0
    value expressions:_col0 (type: string), _col3 (type: string)
    Map Join Operator [MAPJOIN_163]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | | HybridGraceHashJoin:true
    keys:{"Map 23":"_col0 (type: string)","Map 24":"_col0 (type: string)"}
    outputColumnNames:["_col0","_col1","_col3"]
    Statistics:Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
    @@ -3832,6 +3860,7 @@ Stage-0
                                                   Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
                                                   Map Join Operator [MAPJOIN_159]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    keys:{"Reducer 3":"_col1 (type: string)","Map 10":"_col1 (type: string)"}
    outputColumnNames:["_col2"]
    Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
    @@ -3997,6 +4026,7 @@ Stage-5
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_108]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 21":"_col1 (type: string)","Map 16":"_col1 (type: string)"}
    outputColumnNames:["_col0","_col6"]
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    @@ -4009,6 +4039,7 @@ Stage-5
    value expressions:_col0 (type: string), _col6 (type: string)
    Map Join Operator [MAPJOIN_105]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | | HybridGraceHashJoin:true
    keys:{"Map 20":"key (type: string)","Map 21":"key (type: string)"}
    outputColumnNames:["_col0","_col1","_col6"]
    Statistics:Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
    @@ -4081,6 +4112,7 @@ Stage-5
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_108]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 21":"_col1 (type: string)","Map 17":"_col1 (type: string)"}
    outputColumnNames:["_col0","_col6"]
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    @@ -4114,6 +4146,7 @@ Stage-5
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_108]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 21":"_col1 (type: string)","Map 18":"_col1 (type: string)"}
    outputColumnNames:["_col0","_col6"]
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    @@ -4147,6 +4180,7 @@ Stage-5
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_108]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 21":"_col1 (type: string)","Map 19":"_col1 (type: string)"}
    outputColumnNames:["_col0","_col6"]
    Statistics:Num rows: 839 Data size: 8873 Basic stats: COMPLETE Column stats: NONE
    @@ -4299,6 +4333,7 @@ Stage-5
    value expressions:_col0 (type: string), _col6 (type: string)
    Map Join Operator [MAPJOIN_103]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 6":"key (type: string)","Map 7":"key (type: string)"}
    outputColumnNames:["_col0","_col1","_col6"]
    Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    @@ -4511,6 +4546,7 @@ Stage-5
    Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
    Map Join Operator [MAPJOIN_160]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 34":"_col1 (type: string)","Reducer 29":"_col1 (type: string)"}
    outputColumnNames:["_col0","_col6"]
    Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
    @@ -4523,6 +4559,7 @@ Stage-5
    value expressions:_col0 (type: string), _col6 (type: string)
    Map Join Operator [MAPJOIN_157]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | | HybridGraceHashJoin:true
    keys:{"Map 33":"key (type: string)","Map 34":"key (type: string)"}
    outputColumnNames:["_col0","_col1","_col6"]
    Statistics:Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
    @@ -4843,6 +4880,7 @@ Stage-5
    value expressions:_col0 (type: string), _col6 (type: string)
    Map Join Operator [MAPJOIN_155]
    condition map:[{"":"Inner Join 0 to 1"}]
    + | | HybridGraceHashJoin:true
    keys:{"Map 10":"key (type: string)","Map 11":"key (type: string)"}
    outputColumnNames:["_col0","_col1","_col6"]
    Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
    index 4f69b3b..880d2ad 100644
    --- a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
    +++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
    @@ -1,3 +1,55 @@
    +PREHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true')
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@acid_vectorized
    +POSTHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true')
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@acid_vectorized
    +PREHOOK: query: insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@alltypesorc
    +PREHOOK: Output: default@acid_vectorized
    +POSTHOOK: query: insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@alltypesorc
    +POSTHOOK: Output: default@acid_vectorized
    +POSTHOOK: Lineage: acid_vectorized.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
    +POSTHOOK: Lineage: acid_vectorized.b SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
    +PREHOOK: query: explain select a, b from acid_vectorized order by a, b
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain select a, b from acid_vectorized order by a, b
    +POSTHOOK: type: QUERY
    +Plan optimized by CBO.
    +
    +Vertex dependency in root stage
    +Reducer 2 <- Map 1 (SIMPLE_EDGE)
    +
    +Stage-0
    + Fetch Operator
    + limit:-1
    + Stage-1
    + Reducer 2 vectorized
    + File Output Operator [FS_8]
    + compressed:false
    + Statistics:Num rows: 10 Data size: 1704 Basic stats: COMPLETE Column stats: NONE
    + table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
    + Select Operator [OP_7]
    + | outputColumnNames:["_col0","_col1"]
    + | Statistics:Num rows: 10 Data size: 1704 Basic stats: COMPLETE Column stats: NONE
    + |<-Map 1 [SIMPLE_EDGE] vectorized
    + Reduce Output Operator [RS_6]
    + key expressions:_col0 (type: int), _col1 (type: string)
    + sort order:++
    + Statistics:Num rows: 10 Data size: 1704 Basic stats: COMPLETE Column stats: NONE
    + Select Operator [OP_5]
    + outputColumnNames:["_col0","_col1"]
    + Statistics:Num rows: 10 Data size: 1704 Basic stats: COMPLETE Column stats: NONE
    + TableScan [TS_0]
    + ACID table:true
    + alias:acid_vectorized
    + Statistics:Num rows: 10 Data size: 1704 Basic stats: COMPLETE Column stats: NONE
    +
      PREHOOK: query: explain select key, value
      FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol
      PREHOOK: type: QUERY
    @@ -448,15 +500,15 @@ Stage-0
         Fetch Operator
            limit:5
            Stage-1
    - Reducer 2
    - File Output Operator [FS_5]
    + Reducer 2 vectorized
    + File Output Operator [FS_8]
                  compressed:false
                  Statistics:Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
                  table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
    - Limit [LIM_4]
    + Limit [LIM_7]
                     Number of rows:5
                     Statistics:Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
    - Select Operator [SEL_3]
    + Select Operator [OP_6]
    outputColumnNames:["_col0","_col1"]
    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    <-Map 1 [SIMPLE_EDGE]
    @@ -506,15 +558,15 @@ Stage-3
                              Stage-8(CONDITIONAL CHILD TASKS: Stage-5, Stage-4, Stage-6)
                                 Conditional Operator
                                    Stage-1
    - Map 1
    - File Output Operator [FS_3]
    + Map 1 vectorized
    + File Output Operator [FS_10]
                                          compressed:false
                                          Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
                                          table:{"input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat","serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.orc_merge5"}
    - Select Operator [SEL_2]
    + Select Operator [OP_9]
                                             outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
                                             Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
    - Filter Operator [FIL_4]
    + Filter Operator [FIL_8]
                                                predicate:(userid <= 13) (type: boolean)
                                                Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
                                                TableScan [TS_0]
    @@ -539,3 +591,165 @@ POSTHOOK: query: drop table orc_merge5
      POSTHOOK: type: DROPTABLE
      POSTHOOK: Input: default@orc_merge5
      POSTHOOK: Output: default@orc_merge5
    +PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@srcbucket_mapjoin
    +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@srcbucket_mapjoin
    +PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@tab_part
    +POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@tab_part
    +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@srcbucket_mapjoin_part
    +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@srcbucket_mapjoin_part
    +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
    +PREHOOK: type: LOAD
    +#### A masked pattern was here ####
    +PREHOOK: Output: default@srcbucket_mapjoin
    +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
    +POSTHOOK: type: LOAD
    +#### A masked pattern was here ####
    +POSTHOOK: Output: default@srcbucket_mapjoin
    +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
    +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
    +PREHOOK: type: LOAD
    +#### A masked pattern was here ####
    +PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
    +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
    +POSTHOOK: type: LOAD
    +#### A masked pattern was here ####
    +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
    +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
    +PREHOOK: type: LOAD
    +#### A masked pattern was here ####
    +PREHOOK: Output: default@srcbucket_mapjoin_part
    +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
    +POSTHOOK: type: LOAD
    +#### A masked pattern was here ####
    +POSTHOOK: Output: default@srcbucket_mapjoin_part
    +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
    +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
    +PREHOOK: type: LOAD
    +#### A masked pattern was here ####
    +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
    +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
    +POSTHOOK: type: LOAD
    +#### A masked pattern was here ####
    +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
    +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
    +PREHOOK: type: LOAD
    +#### A masked pattern was here ####
    +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
    +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
    +POSTHOOK: type: LOAD
    +#### A masked pattern was here ####
    +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
    +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
    +PREHOOK: type: LOAD
    +#### A masked pattern was here ####
    +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
    +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
    +POSTHOOK: type: LOAD
    +#### A masked pattern was here ####
    +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
    +PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
    +select key,value from srcbucket_mapjoin_part
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@srcbucket_mapjoin_part
    +PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
    +PREHOOK: Output: default@tab_part@ds=2008-04-08
    +POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
    +select key,value from srcbucket_mapjoin_part
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@srcbucket_mapjoin_part
    +POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
    +POSTHOOK: Output: default@tab_part@ds=2008-04-08
    +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ]
    +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ]
    +PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@tab
    +POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@tab
    +PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
    +select key,value from srcbucket_mapjoin
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@srcbucket_mapjoin
    +PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
    +PREHOOK: Output: default@tab@ds=2008-04-08
    +POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
    +select key,value from srcbucket_mapjoin
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@srcbucket_mapjoin
    +POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
    +POSTHOOK: Output: default@tab@ds=2008-04-08
    +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
    +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
    +PREHOOK: query: explain
    +select a.key, a.value, b.value
    +from tab a join tab_part b on a.key = b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +select a.key, a.value, b.value
    +from tab a join tab_part b on a.key = b.key
    +POSTHOOK: type: QUERY
    +Plan not optimized by CBO due to missing statistics. Please check log for more details.
    +
    +Vertex dependency in root stage
    +Map 2 <- Map 1 (CUSTOM_EDGE)
    +
    +Stage-0
    + Fetch Operator
    + limit:-1
    + Stage-1
    + Map 2
    + File Output Operator [FS_8]
    + compressed:false
    + Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
    + Select Operator [SEL_7]
    + outputColumnNames:["_col0","_col1","_col2"]
    + Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + Map Join Operator [MAPJOIN_13]
    + | BucketMapJoin:true
    + | condition map:[{"":"Inner Join 0 to 1"}]
    + | HybridGraceHashJoin:true
    + | keys:{"Map 1":"key (type: int)","Map 2":"key (type: int)"}
    + | outputColumnNames:["_col0","_col1","_col7"]
    + | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + |<-Map 1 [CUSTOM_EDGE]
    + | Reduce Output Operator [RS_3]
    + | key expressions:key (type: int)
    + | Map-reduce partition columns:key (type: int)
    + | sort order:+
    + | Statistics:Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
    + | value expressions:value (type: string)
    + | Filter Operator [FIL_11]
    + | predicate:key is not null (type: boolean)
    + | Statistics:Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
    + | TableScan [TS_0]
    + | alias:a
    + | Statistics:Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
    + |<-Filter Operator [FIL_12]
    + predicate:key is not null (type: boolean)
    + Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + TableScan [TS_1]
    + alias:b
    + Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/tez/vector_aggregate_without_gby.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vector_aggregate_without_gby.q.out b/ql/src/test/results/clientpositive/tez/vector_aggregate_without_gby.q.out
    index 9718871..1d84e3b 100644
    --- a/ql/src/test/results/clientpositive/tez/vector_aggregate_without_gby.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vector_aggregate_without_gby.q.out
    @@ -46,7 +46,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 2
    + Reducer 2 vectorized
               File Output Operator [FS_7]
                  compressed:false
                  Statistics:Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: NONE
    @@ -55,7 +55,7 @@ Stage-0
    aggregations:["max(VALUE._col0)","max(VALUE._col1)"]
    outputColumnNames:["_col0","_col1"]
    Statistics:Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: NONE
    - |<-Map 1 [SIMPLE_EDGE]
    + |<-Map 1 [SIMPLE_EDGE] vectorized
                     Reduce Output Operator [RS_4]
                        sort order:
                        Statistics:Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: NONE

    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/tez/vector_auto_smb_mapjoin_14.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vector_auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/tez/vector_auto_smb_mapjoin_14.q.out
    index 480c4e1..cb6de24 100644
    --- a/ql/src/test/results/clientpositive/tez/vector_auto_smb_mapjoin_14.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vector_auto_smb_mapjoin_14.q.out
    @@ -63,7 +63,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 2
    + Reducer 2 vectorized
               File Output Operator [FS_13]
                  compressed:false
                  Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
    @@ -146,7 +146,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 3
    + Reducer 3 vectorized
               File Output Operator [FS_18]
                  compressed:false
                  Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
    @@ -155,7 +155,7 @@ Stage-0
    aggregations:["count(VALUE._col0)"]
    outputColumnNames:["_col0"]
    Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
    - |<-Reducer 2 [SIMPLE_EDGE]
    + |<-Reducer 2 [SIMPLE_EDGE] vectorized
                     Reduce Output Operator [RS_15]
                        sort order:
                        Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
    @@ -289,7 +289,7 @@ Stage-0
    keys:{"0":"_col0 (type: int)","1":"_col0 (type: int)"}
    outputColumnNames:["_col0","_col1","_col3"]
    Statistics:Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: NONE
    - |<-Reducer 2 [SIMPLE_EDGE]
    + |<-Reducer 2 [SIMPLE_EDGE] vectorized
    Reduce Output Operator [RS_51]
    key expressions:_col0 (type: int)
    Map-reduce partition columns:_col0 (type: int)
    @@ -331,7 +331,7 @@ Stage-0
    TableScan [TS_0]
    alias:a
    Statistics:Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
    - |<-Reducer 6 [SIMPLE_EDGE]
    + |<-Reducer 6 [SIMPLE_EDGE] vectorized
                        Reduce Output Operator [RS_53]
                           key expressions:_col0 (type: int)
                           Map-reduce partition columns:_col0 (type: int)
    @@ -445,7 +445,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 2
    + Reducer 2 vectorized
               File Output Operator [FS_16]
                  compressed:false
                  Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
    @@ -543,7 +543,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 2
    + Reducer 2 vectorized
               File Output Operator [FS_16]
                  compressed:false
                  Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
    @@ -662,7 +662,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 2
    + Reducer 2 vectorized
               File Output Operator [FS_20]
                  compressed:false
                  Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
    @@ -774,7 +774,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 2
    + Reducer 2 vectorized
               File Output Operator [FS_16]
                  compressed:false
                  Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
    @@ -863,7 +863,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 3
    + Reducer 3 vectorized
               File Output Operator [FS_14]
                  compressed:false
                  Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
    @@ -885,7 +885,7 @@ Stage-0
    condition map:[{"":"Inner Join 0 to 1"}]
    keys:{"0":"_col0 (type: int)","1":"_col0 (type: int)"}
    Statistics:Num rows: 5 Data size: 511 Basic stats: COMPLETE Column stats: NONE
    - |<-Map 1 [SIMPLE_EDGE]
    + |<-Map 1 [SIMPLE_EDGE] vectorized
    Reduce Output Operator [RS_22]
    key expressions:_col0 (type: int)
    Map-reduce partition columns:_col0 (type: int)
    @@ -900,7 +900,7 @@ Stage-0
    TableScan [TS_0]
    alias:a
    Statistics:Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
    - |<-Map 4 [SIMPLE_EDGE]
    + |<-Map 4 [SIMPLE_EDGE] vectorized
                              Reduce Output Operator [RS_25]
                                 key expressions:_col0 (type: int)
                                 Map-reduce partition columns:_col0 (type: int)
    @@ -958,7 +958,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 2
    + Reducer 2 vectorized
               File Output Operator [FS_14]
                  compressed:false
                  Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
    @@ -1045,7 +1045,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 2
    + Reducer 2 vectorized
               File Output Operator [FS_21]
                  compressed:false
                  Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
    @@ -1161,7 +1161,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 2
    + Reducer 2 vectorized
               File Output Operator [FS_17]
                  compressed:false
                  Statistics:Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
    @@ -1448,7 +1448,7 @@ Stage-4
                  Stage-3
                     Dependency Collection{}
                        Stage-2
    - Reducer 2
    + Reducer 2 vectorized
                           File Output Operator [FS_25]
                              compressed:false
                              Statistics:Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE

    http://git-wip-us.apache.org/repos/asf/hive/blob/6577f55c/ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out b/ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out
    index a7ff528..0cb2270 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorized_parquet_types.q.out
    @@ -249,7 +249,7 @@ Stage-0
         Fetch Operator
            limit:-1
            Stage-1
    - Reducer 3
    + Reducer 3 vectorized
               File Output Operator [FS_10]
                  compressed:false
                  Statistics:Num rows: 11 Data size: 121 Basic stats: COMPLETE Column stats: NONE
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-12171 : LLAP: BuddyAllocator failures when querying uncompressed data (Sergey Shelukhin, reviewed by Gopal V)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cdbd1c85
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cdbd1c85
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cdbd1c85

    Branch: refs/heads/master-fixed
    Commit: cdbd1c8517e70614ec9dfd0bfdc978b200a946c2
    Parents: a46005c
    Author: Sergey Shelukhin <sershe@apache.org>
    Authored: Mon Nov 2 13:16:34 2015 -0800
    Committer: Sergey Shelukhin <sershe@apache.org>
    Committed: Mon Nov 2 13:16:34 2015 -0800

    ----------------------------------------------------------------------
      .../org/apache/hadoop/hive/conf/HiveConf.java | 7 +-
      .../hadoop/hive/llap/cache/BuddyAllocator.java | 89 +++++++++------
      .../llap/cache/LowLevelCacheMemoryManager.java | 12 ++
      .../hadoop/hive/llap/cache/MemoryManager.java | 1 +
      .../hive/llap/cache/TestBuddyAllocator.java | 6 +-
      .../hive/llap/cache/TestOrcMetadataCache.java | 4 +
      .../ql/io/orc/encoded/EncodedReaderImpl.java | 109 ++++++++++---------
      7 files changed, 144 insertions(+), 84 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    ----------------------------------------------------------------------
    diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    index 5198bb5..3ab73ad 100644
    --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    @@ -2308,9 +2308,10 @@ public class HiveConf extends Configuration {
          LLAP_ORC_CACHE_MAX_ALLOC("hive.llap.io.cache.orc.alloc.max", 16 * 1024 * 1024,
              "Maximum allocation possible from LLAP low-level cache for ORC. Should be as large as\n" +
              "the largest expected ORC compression buffer size. Must be power of 2."),
    - LLAP_ORC_CACHE_ARENA_SIZE("hive.llap.io.cache.orc.arena.size", 128 * 1024 * 1024,
    - "Arena size for ORC low-level cache; cache will be allocated in arena-sized steps.\n" +
    - "Must presently be a power of two."),
    + LLAP_ORC_CACHE_ARENA_COUNT("hive.llap.io.cache.orc.arena.count", 8,
    + "Arena count for LLAP low-level cache; cache will be allocated in the steps of\n" +
    + "(size/arena_count) bytes. This size must be <= 1Gb and >= max allocation; if it is\n" +
    + "not the case, an adjusted size will be used. Using powers of 2 is recommended."),
          LLAP_ORC_CACHE_MAX_SIZE("hive.llap.io.cache.orc.size", 1024L * 1024 * 1024,
              "Maximum size for ORC low-level cache; must be a multiple of arena size."),
          LLAP_ORC_CACHE_ALLOCATE_DIRECT("hive.llap.io.cache.direct", true,

    http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
    index 2aca68d..485a145 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
    @@ -40,33 +40,43 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
        private final long maxSize;
        private final boolean isDirect;
        private final LlapDaemonCacheMetrics metrics;
    -
    +
    + // We don't know the acceptable size for Java array, so we'll use 1Gb boundary.
    + // That is guaranteed to fit any maximum allocation.
    + private static final int MAX_ARENA_SIZE = 1024*1024*1024;
        public BuddyAllocator(Configuration conf, MemoryManager memoryManager,
            LlapDaemonCacheMetrics metrics) {
          isDirect = HiveConf.getBoolVar(conf, ConfVars.LLAP_ORC_CACHE_ALLOCATE_DIRECT);
          minAllocation = HiveConf.getIntVar(conf, ConfVars.LLAP_ORC_CACHE_MIN_ALLOC);
          maxAllocation = HiveConf.getIntVar(conf, ConfVars.LLAP_ORC_CACHE_MAX_ALLOC);
    - arenaSize = HiveConf.getIntVar(conf, ConfVars.LLAP_ORC_CACHE_ARENA_SIZE);
    + int arenaCount = HiveConf.getIntVar(conf, ConfVars.LLAP_ORC_CACHE_ARENA_COUNT);
          long maxSizeVal = HiveConf.getLongVar(conf, ConfVars.LLAP_ORC_CACHE_MAX_SIZE);
    - if (LlapIoImpl.LOGL.isInfoEnabled()) {
    + int arenaSizeVal = (arenaCount == 0) ? MAX_ARENA_SIZE : (int)(maxSizeVal / arenaCount);
    + arenaSizeVal = Math.max(maxAllocation, Math.min(arenaSizeVal, MAX_ARENA_SIZE));
    + if (LlapIoImpl.LOG.isInfoEnabled()) {
            LlapIoImpl.LOG.info("Buddy allocator with " + (isDirect ? "direct" : "byte")
                + " buffers; allocation sizes " + minAllocation + " - " + maxAllocation
    - + ", arena size " + arenaSize + ". total size " + maxSizeVal);
    + + ", arena size " + arenaSizeVal + ". total size " + maxSizeVal);
          }

          if (minAllocation < 8) {
            throw new AssertionError("Min allocation must be at least 8: " + minAllocation);
          }
    - if (maxSizeVal < arenaSize || arenaSize < maxAllocation || maxAllocation < minAllocation) {
    + if (maxSizeVal < arenaSizeVal || maxAllocation < minAllocation) {
            throw new AssertionError("Inconsistent sizes of cache, arena and allocations: "
    - + minAllocation + ", " + maxAllocation + ", " + arenaSize + ", " + maxSizeVal);
    + + minAllocation + ", " + maxAllocation + ", " + arenaSizeVal + ", " + maxSizeVal);
    + }
    + if ((Integer.bitCount(minAllocation) != 1) || (Integer.bitCount(maxAllocation) != 1)) {
    + throw new AssertionError("Allocation sizes must be powers of two: "
    + + minAllocation + ", " + maxAllocation);
          }
    - if ((Integer.bitCount(minAllocation) != 1) || (Integer.bitCount(maxAllocation) != 1)
    - || (Long.bitCount(arenaSize) != 1)) {
    - // Technically, arena size only needs to be divisible by maxAlloc
    - throw new AssertionError("Allocation and arena sizes must be powers of two: "
    - + minAllocation + ", " + maxAllocation + ", " + arenaSize);
    + if ((arenaSizeVal % maxAllocation) > 0) {
    + long oldArenaSize = arenaSizeVal;
    + arenaSizeVal = (arenaSizeVal / maxAllocation) * maxAllocation;
    + LlapIoImpl.LOG.warn("Rounding arena size to " + arenaSizeVal + " from " + oldArenaSize
    + + " to be divisible by allocation size " + maxAllocation);
          }
    + arenaSize = arenaSizeVal;
          if ((maxSizeVal % arenaSize) > 0) {
            long oldMaxSize = maxSizeVal;
            maxSizeVal = (maxSizeVal / arenaSize) * arenaSize;
    @@ -111,7 +121,7 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
          // TODO: reserving the entire thing is not ideal before we alloc anything. Interleave?
          memoryManager.reserveMemory(dest.length << allocLog2, true);

    - int ix = 0;
    + int destAllocIx = 0;
          for (int i = 0; i < dest.length; ++i) {
            if (dest[i] != null) continue;
            dest[i] = createUnallocated(); // TODO: pool of objects?
    @@ -123,22 +133,29 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
          }
          long threadId = arenaCount > 1 ? Thread.currentThread().getId() : 0;
          {
    - int startIndex = (int)(threadId % arenaCount), index = startIndex;
    + int startArenaIx = (int)(threadId % arenaCount), index = startArenaIx;
            do {
    - int newIx = arenas[index].allocateFast(index, freeListIx, dest, ix, allocationSize);
    - if (newIx == dest.length) return;
    - if (newIx != -1) { // TODO: check if it can still happen; count should take care of this.
    - ix = newIx;
    - }
    - ix = newIx;
    + int newDestIx = arenas[index].allocateFast(
    + index, freeListIx, dest, destAllocIx, allocationSize);
    + if (newDestIx == dest.length) return;
    + assert newDestIx != -1;
    + destAllocIx = newDestIx;
              if ((++index) == arenaCount) {
                index = 0;
              }
    - } while (index != startIndex);
    + } while (index != startArenaIx);
          }

    - // TODO: this is very hacky.
    - // We called reserveMemory so we know that somewhere in there, there's memory waiting for us.
    + // 1) We can get fragmented on large blocks of uncompressed data. The memory might be
    + // in there, but it might be in separate small blocks. This is a complicated problem, and
    + // several solutions (in order of decreasing ugliness and increasing complexity) are: just
    + // ask to evict the exact-sized block (there may be no such block), evict from a particular
    + // arena (policy would know allocator internals somewhat), store buffer mapping and ask to
    + // evict from specific choice of blocks next to each other or next to already-evicted block,
    + // and finally do a compaction (requires a block mapping and complex sync). For now we'd just
    + // force-evict some memory and avoid both complexity and ugliness, since large blocks are rare.
    + // 2) Fragmentation aside (TODO: and this is a very hacky solution for that),
    + // we called reserveMemory so we know that there's memory waiting for us somewhere.
          // However, we have a class of rare race conditions related to the order of locking/checking of
          // different allocation areas. Simple case - say we have 2 arenas, 256Kb available in arena 2.
          // We look at arena 1; someone deallocs 256Kb from arena 1 and allocs the same from arena 2;
    @@ -155,22 +172,32 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
          // But for now we will just retry 5 times 0_o
          for (int attempt = 0; attempt < 5; ++attempt) {
            // Try to split bigger blocks. TODO: again, ideally we would tryLock at least once
    - for (int i = 0; i < arenaCount; ++i) {
    - int newIx = arenas[i].allocateWithSplit(i, freeListIx, dest, ix, allocationSize);
    - if (newIx == -1) break; // Shouldn't happen.
    - if (newIx == dest.length) return;
    - ix = newIx;
    + {
    + int startArenaIx = (int)((threadId + attempt) % arenaCount), arenaIx = startArenaIx;
    + do {
    + int newDestIx = arenas[arenaIx].allocateWithSplit(
    + arenaIx, freeListIx, dest, destAllocIx, allocationSize);
    + if (newDestIx == dest.length) return;
    + assert newDestIx != -1;
    + destAllocIx = newDestIx;
    + if ((++arenaIx) == arenaCount) {
    + arenaIx = 0;
    + }
    + } while (arenaIx != startArenaIx);
            }
    +
            if (attempt == 0) {
              // Try to allocate memory if we haven't allocated all the way to maxSize yet; very rare.
    - for (int i = arenaCount; i < arenas.length; ++i) {
    - ix = arenas[i].allocateWithExpand(i, freeListIx, dest, ix, allocationSize);
    - if (ix == dest.length) return;
    + for (int arenaIx = arenaCount; arenaIx < arenas.length; ++arenaIx) {
    + destAllocIx = arenas[arenaIx].allocateWithExpand(
    + arenaIx, freeListIx, dest, destAllocIx, allocationSize);
    + if (destAllocIx == dest.length) return;
              }
            }
    + memoryManager.forceReservedMemory(allocationSize * (dest.length - destAllocIx));
            LlapIoImpl.LOG.warn("Failed to allocate despite reserved memory; will retry " + attempt);
          }
    - String msg = "Failed to allocate " + size + "; at " + ix + " out of " + dest.length;
    + String msg = "Failed to allocate " + size + "; at " + destAllocIx + " out of " + dest.length;
          LlapIoImpl.LOG.error(msg + "\nALLOCATOR STATE:\n" + debugDump()
              + "\nPARENT STATE:\n" + memoryManager.debugDumpForOom());
          throw new AllocatorOutOfMemoryException(msg);

    http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
    index 4a256ee..d584ca8 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
    @@ -71,6 +71,8 @@ public class LowLevelCacheMemoryManager implements MemoryManager {
                try {
                  Thread.sleep(Math.min(1000, nextLog));
                } catch (InterruptedException e) {
    + Thread.currentThread().interrupt();
    + return false;
                }
              }
              continue;
    @@ -90,6 +92,16 @@ public class LowLevelCacheMemoryManager implements MemoryManager {
          return true;
        }

    +
    + @Override
    + public void forceReservedMemory(int memoryToEvict) {
    + while (memoryToEvict > 0) {
    + long evicted = evictor.evictSomeBlocks(memoryToEvict);
    + if (evicted == 0) return;
    + memoryToEvict -= evicted;
    + }
    + }
    +
        @Override
        public void releaseMemory(long memoryToRelease) {
          long oldV;

    http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java
    index e1b0cb4..6cc262e 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/MemoryManager.java
    @@ -22,4 +22,5 @@ public interface MemoryManager extends LlapOomDebugDump {
        boolean reserveMemory(long memoryToReserve, boolean waitForEviction);
        void releaseMemory(long memUsage);
        void updateMaxSize(long maxSize);
    + void forceReservedMemory(int memoryToEvict);
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
    index 6d21997..6375996 100644
    --- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
    +++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
    @@ -58,6 +58,10 @@ public class TestBuddyAllocator {
          @Override
          public void updateMaxSize(long maxSize) {
          }
    +
    + @Override
    + public void forceReservedMemory(int memoryToEvict) {
    + }
        }

        @Test
    @@ -280,7 +284,7 @@ public class TestBuddyAllocator {
          Configuration conf = new Configuration();
          conf.setInt(ConfVars.LLAP_ORC_CACHE_MIN_ALLOC.varname, min);
          conf.setInt(ConfVars.LLAP_ORC_CACHE_MAX_ALLOC.varname, max);
    - conf.setInt(ConfVars.LLAP_ORC_CACHE_ARENA_SIZE.varname, arena);
    + conf.setInt(ConfVars.LLAP_ORC_CACHE_ARENA_COUNT.varname, total/arena);
          conf.setLong(ConfVars.LLAP_ORC_CACHE_MAX_SIZE.varname, total);
          return conf;
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
    index b886d77..901e58a 100644
    --- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
    +++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestOrcMetadataCache.java
    @@ -78,6 +78,10 @@ public class TestOrcMetadataCache {
          @Override
          public void updateMaxSize(long maxSize) {
          }
    +
    + @Override
    + public void forceReservedMemory(int memoryToEvict) {
    + }
        }

        @Test

    http://git-wip-us.apache.org/repos/asf/hive/blob/cdbd1c85/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
    index e0c0743..f789a4f 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
    @@ -36,6 +36,7 @@ import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamD
      import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer;
      import org.apache.hadoop.hive.ql.io.orc.CompressionCodec;
      import org.apache.hadoop.hive.ql.io.orc.DataReader;
    +import org.apache.hadoop.hive.ql.io.orc.OrcConf;
      import org.apache.hadoop.hive.ql.io.orc.OrcProto;
      import org.apache.hadoop.hive.ql.io.orc.OutStream;
      import org.apache.hadoop.hive.ql.io.orc.RecordReaderUtils;
    @@ -751,7 +752,7 @@ class EncodedReaderImpl implements EncodedReader {

        /**
         * To achieve some sort of consistent cache boundaries, we will cache streams deterministically;
    - * in segments starting w/stream start, and going for either stream size or maximum allocation.
    + * in segments starting w/stream start, and going for either stream size or some fixed size.
         * If we are not reading the entire segment's worth of data, then we will not cache the partial
         * RGs; the breakage of cache assumptions (no interleaving blocks, etc.) is way too much PITA
         * to handle just for this case.
    @@ -777,87 +778,87 @@ class EncodedReaderImpl implements EncodedReader {
          }
          // Account for maximum cache buffer size.
          long streamLen = streamEnd - streamOffset;
    - int partSize = cache.getAllocator().getMaxAllocation(),
    - partCount = (int)((streamLen / partSize) + (((streamLen % partSize) != 0) ? 1 : 0));
    - long partOffset = streamOffset, partEnd = Math.min(partOffset + partSize, streamEnd);
    + int partSize = determineUncompressedPartSize(), //
    + partCount = (int)(streamLen / partSize) + (((streamLen % partSize) != 0) ? 1 : 0);

          CacheChunk lastUncompressed = null;
          MemoryBuffer[] singleAlloc = new MemoryBuffer[1];
    + /*
    +Starting pre-read for [12187411,17107411) at start: 12187411 end: 12449555 cache buffer: 0x5f64a8f6(2)
    +Processing uncompressed file data at [12187411, 12449555)
    + */
          for (int i = 0; i < partCount; ++i) {
    - long hasEntirePartTo = -1;
    - if (partOffset == current.getOffset()) {
    - hasEntirePartTo = partOffset;
    + long partOffset = streamOffset + (i * partSize),
    + partEnd = Math.min(partOffset + partSize, streamEnd);
    + long hasEntirePartTo = partOffset; // We have 0 bytes of data for this part, for now.
    + assert partOffset <= current.getOffset();
    + if (partOffset == current.getOffset() && current instanceof CacheChunk) {
              // We assume cache chunks would always match the way we read, so check and skip it.
    - if (current instanceof CacheChunk) {
    - lastUncompressed = (CacheChunk)current;
    - assert current.getOffset() == partOffset && current.getEnd() == partEnd;
    - partOffset = partEnd;
    - partEnd = Math.min(partOffset + partSize, streamEnd);
    - continue;
    - }
    + assert current.getOffset() == partOffset && current.getEnd() == partEnd;
    + lastUncompressed = (CacheChunk)current;
    + current = current.next;
    + continue;
            }
            if (current.getOffset() >= partEnd) {
    - // We have no data at all for this part of the stream (could be unneeded), skip.
    - partOffset = partEnd;
    - partEnd = Math.min(partOffset + partSize, streamEnd);
    - continue;
    + continue; // We have no data at all for this part of the stream (could be unneeded), skip.
            }
            if (toRelease == null && dataReader.isTrackingDiskRanges()) {
              toRelease = new ArrayList<ByteBuffer>();
            }
            // We have some disk buffers... see if we have entire part, etc.
    - UncompressedCacheChunk candidateCached = null;
    + UncompressedCacheChunk candidateCached = null; // We will cache if we have the entire part.
            DiskRangeList next = current;
            while (true) {
    - if (next == null || next.getOffset() >= partEnd) {
    - if (hasEntirePartTo < partEnd && candidateCached != null) {
    - // We are missing a section at the end of the part...
    - lastUncompressed = copyAndReplaceCandidateToNonCached(
    - candidateCached, partOffset, hasEntirePartTo, cache, singleAlloc);
    - candidateCached = null;
    - }
    - break;
    + boolean noMoreDataForPart = (next == null || next.getOffset() >= partEnd);
    + if (noMoreDataForPart && hasEntirePartTo < partEnd && candidateCached != null) {
    + // We are missing a section at the end of the part... copy the start to non-cached.
    + lastUncompressed = copyAndReplaceCandidateToNonCached(
    + candidateCached, partOffset, hasEntirePartTo, cache, singleAlloc);
    + candidateCached = null;
              }
              current = next;
    - boolean wasSplit = (current.getEnd() > partEnd);
    - if (wasSplit) {
    + if (noMoreDataForPart) break; // Done with this part.
    +
    + boolean wasSplit = false;
    + if (current.getEnd() > partEnd) {
    + // If the current buffer contains multiple parts, split it.
                current = current.split(partEnd);
    + wasSplit = true;
              }
              if (isDebugTracingEnabled) {
                LOG.info("Processing uncompressed file data at ["
                    + current.getOffset() + ", " + current.getEnd() + ")");
              }
    - BufferChunk bc = (BufferChunk)current;
    + BufferChunk curBc = (BufferChunk)current;
              if (!wasSplit && toRelease != null) {
    - toRelease.add(bc.getChunk()); // TODO: is it valid to give zcr the modified 2nd part?
    + toRelease.add(curBc.getChunk()); // TODO: is it valid to give zcr the modified 2nd part?
              }

              // Track if we still have the entire part.
              long hadEntirePartTo = hasEntirePartTo;
    - if (hasEntirePartTo != -1) {
    - hasEntirePartTo = (hasEntirePartTo == current.getOffset()) ? current.getEnd() : -1;
    - }
    - if (candidateCached != null && hasEntirePartTo == -1) {
    - lastUncompressed = copyAndReplaceCandidateToNonCached(
    - candidateCached, partOffset, hadEntirePartTo, cache, singleAlloc);
    - candidateCached = null;
    - }
    -
    - if (hasEntirePartTo != -1) {
    + // We have data until the end of current block if we had it until the beginning.
    + hasEntirePartTo = (hasEntirePartTo == current.getOffset()) ? current.getEnd() : -1;
    + if (hasEntirePartTo == -1) {
    + // We don't have the entire part; copy both whatever we intended to cache, and the rest,
    + // to an allocated buffer. We could try to optimize a bit if we have contiguous buffers
    + // with gaps, but it's probably not needed.
    + if (candidateCached != null) {
    + assert hadEntirePartTo != -1;
    + copyAndReplaceCandidateToNonCached(
    + candidateCached, partOffset, hadEntirePartTo, cache, singleAlloc);
    + candidateCached = null;
    + }
    + lastUncompressed = copyAndReplaceUncompressedToNonCached(curBc, cache, singleAlloc);
    + next = lastUncompressed.next; // There may be more data after the gap.
    + } else {
                // So far we have all the data from the beginning of the part.
                if (candidateCached == null) {
    - candidateCached = new UncompressedCacheChunk(bc);
    + candidateCached = new UncompressedCacheChunk(curBc);
                } else {
    - candidateCached.addChunk(bc);
    + candidateCached.addChunk(curBc);
                }
    - // We will take care of this at the end of the part, or if we find a gap.
                next = current.next;
    - continue;
              }
    - // We don't have the entire part; just copy to an allocated buffer. We could try to
    - // optimize a bit if we have contiguous buffers with gaps, but it's probably not needed.
    - lastUncompressed = copyAndReplaceUncompressedToNonCached(bc, cache, singleAlloc);
    - next = lastUncompressed.next;
            }
            if (candidateCached != null) {
              if (toCache == null) {
    @@ -908,6 +909,16 @@ class EncodedReaderImpl implements EncodedReader {
          return lastUncompressed;
        }

    +
    + private int determineUncompressedPartSize() {
    + // We will break the uncompressed data in the cache in the chunks that are the size
    + // of the prevalent ORC compression buffer (the default), or maximum allocation (since we
    + // cannot allocate bigger chunks), whichever is less.
    + long orcCbSizeDefault = ((Number)OrcConf.BUFFER_SIZE.getDefaultValue()).longValue();
    + int maxAllocSize = cache.getAllocator().getMaxAllocation();
    + return (int)Math.min(maxAllocSize, orcCbSizeDefault);
    + }
    +
        private static void copyUncompressedChunk(ByteBuffer src, ByteBuffer dest) {
          int startPos = dest.position(), startLim = dest.limit();
          dest.put(src); // Copy uncompressed data to cache.
  • Jxiang at Nov 6, 2015 at 5:32 pm
    HIVE-12295 : change some logs from info to debug (Sergey Shelukhin, reviewed by Ashutosh Chauhan)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a46005cf
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a46005cf
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a46005cf

    Branch: refs/heads/master-fixed
    Commit: a46005cfb260fa1328a9c237796c1fa683a5c35a
    Parents: 6fda3b5
    Author: Sergey Shelukhin <sershe@apache.org>
    Authored: Mon Nov 2 13:07:04 2015 -0800
    Committer: Sergey Shelukhin <sershe@apache.org>
    Committed: Mon Nov 2 13:07:04 2015 -0800

    ----------------------------------------------------------------------
      .../llap/daemon/impl/ContainerRunnerImpl.java | 2 +-
      .../llap/daemon/impl/TaskExecutorService.java | 21 +++++-----
      .../llap/daemon/impl/TaskRunnerCallable.java | 4 +-
      .../llap/io/encoded/OrcEncodedDataReader.java | 4 +-
      .../llap/shufflehandler/ShuffleHandler.java | 13 +++---
      .../dag/app/rm/LlapTaskSchedulerService.java | 6 +--
      .../hive/metastore/AggregateStatsCache.java | 2 +-
      .../hadoop/hive/ql/exec/MapJoinOperator.java | 8 ++--
      .../hadoop/hive/ql/exec/MapredContext.java | 2 +-
      .../apache/hadoop/hive/ql/exec/Operator.java | 42 ++++++++++----------
      .../apache/hadoop/hive/ql/exec/Utilities.java | 6 +--
      .../hadoop/hive/ql/exec/mr/ObjectCache.java | 10 ++---
      .../hive/ql/exec/tez/LlapObjectCache.java | 18 +++++----
      .../hadoop/hive/ql/exec/tez/TezProcessor.java | 10 +++--
      .../ql/exec/vector/VectorGroupByOperator.java | 2 +-
      .../ql/exec/vector/VectorizationContext.java | 11 +++--
      .../ql/io/HiveContextAwareRecordReader.java | 2 +-
      .../hadoop/hive/ql/io/HiveInputFormat.java | 4 +-
      .../physical/NullScanTaskDispatcher.java | 4 +-
      .../hive/ql/optimizer/physical/Vectorizer.java | 11 +++--
      .../hadoop/hive/ql/ppd/OpProcFactory.java | 18 ++++++---
      21 files changed, 114 insertions(+), 86 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    index fad2d2c..4b28b53 100644
    --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
    @@ -191,7 +191,7 @@ public class ContainerRunnerImpl extends CompositeService implements ContainerRu

            Token<JobTokenIdentifier> jobToken = TokenCache.getSessionToken(credentials);

    - LOG.info("DEBUG: Registering request with the ShuffleHandler");
    + LOG.debug("Registering request with the ShuffleHandler");
            ShuffleHandler.get()
                .registerDag(request.getApplicationIdString(), dagIdentifier, jobToken,
                    request.getUser(), localDirs);

    http://git-wip-us.apache.org/repos/asf/hive/blob/a46005cf/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
    ----------------------------------------------------------------------
    diff --git a/llap-server/src