FAQ
Repository: hive
Updated Branches:
   refs/heads/master 31cc894df -> 9a1f76928


HIVE-12501 : LLAP: don't use read(ByteBuffer) in IO (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9a1f7692
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9a1f7692
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9a1f7692

Branch: refs/heads/master
Commit: 9a1f76928900a6697d9efe74f28b7183d2979a1d
Parents: 31cc894
Author: Sergey Shelukhin <sershe@apache.org>
Authored: Wed Nov 25 17:25:06 2015 -0800
Committer: Sergey Shelukhin <sershe@apache.org>
Committed: Wed Nov 25 17:25:06 2015 -0800

----------------------------------------------------------------------
  .../hive/ql/io/orc/RecordReaderUtils.java | 43 +++++---------------
  1 file changed, 11 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9a1f7692/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
index 6f3a3e9..0caeb1b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
@@ -327,47 +327,26 @@ public class RecordReaderUtils {
            len -= read;
            off += read;
          }
- } else if (doForceDirect) {
- file.seek(base + off);
- ByteBuffer directBuf = ByteBuffer.allocateDirect(len);
- readDirect(file, len, directBuf);
- range = range.replaceSelfWith(new BufferChunk(directBuf, range.getOffset()));
        } else {
+ // Don't use HDFS ByteBuffer API because it has no readFully, and is buggy and pointless.
          byte[] buffer = new byte[len];
          file.readFully((base + off), buffer, 0, buffer.length);
- range = range.replaceSelfWith(new BufferChunk(ByteBuffer.wrap(buffer), range.getOffset()));
+ ByteBuffer bb = null;
+ if (doForceDirect) {
+ bb = ByteBuffer.allocateDirect(len);
+ bb.put(buffer);
+ bb.position(0);
+ bb.limit(len);
+ } else {
+ bb = ByteBuffer.wrap(buffer);
+ }
+ range = range.replaceSelfWith(new BufferChunk(bb, range.getOffset()));
        }
        range = range.next;
      }
      return prev.next;
    }

- public static void readDirect(FSDataInputStream file,
- int len, ByteBuffer directBuf) throws IOException {
- // TODO: HDFS API is a mess, so handle all kinds of cases.
- // Before 2.7, read() also doesn't adjust position correctly, so track it separately.
- int pos = directBuf.position(), startPos = pos, endPos = pos + len;
- try {
- while (pos < endPos) {
- int count = SHIMS.readByteBuffer(file, directBuf);
- if (count < 0) throw new EOFException();
- assert count != 0 : "0-length read: " + (endPos - pos) + "@" + (pos - startPos);
- pos += count;
- assert pos <= endPos : "Position " + pos + " > " + endPos + " after reading " + count;
- directBuf.position(pos);
- }
- } catch (UnsupportedOperationException ex) {
- assert pos == startPos;
- // Happens in q files and such.
- RecordReaderImpl.LOG.error("Stream does not support direct read; we will copy.");
- byte[] buffer = new byte[len];
- file.readFully(buffer, 0, buffer.length);
- directBuf.put(buffer);
- }
- directBuf.position(startPos);
- directBuf.limit(startPos + len);
- }
-

    static List<DiskRange> getStreamBuffers(DiskRangeList range, long offset, long length) {
      // This assumes sorted ranges (as do many other parts of ORC code.

Search Discussions

Related Discussions

Discussion Navigation
viewthread | post
posts ‹ prev | 1 of 1 | next ›
Discussion Overview
groupcommits @
categorieshive, hadoop
postedNov 26, '15 at 1:28a
activeNov 26, '15 at 1:28a
posts1
users1
websitehive.apache.org

1 user in discussion

Sershe: 1 post

People

Translate

site design / logo © 2021 Grokbase