FAQ
Hi,

I'm using the MapFileOutputFormat to lookup values in MapFiles and keep
getting "Could not obtain block" errors. I'm thinking it might be because
ulimit is not set high enough. Has anyone else run into this issue?

attempt_201011180019_0005_m_000003_0: Caught exception while getting cached
files: java.io.IOException: Could not obtain block:
blk_-7027776556206952935_61338 file=/mydata/part-r-00000/data
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.hdfs.DFSClient$DFSInputStream.chooseDataNode(DFSClient.java:1976)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.hdfs.DFSClient$DFSInputStream.blockSeekTo(DFSClient.java:1783)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.hdfs.DFSClient$DFSInputStream.read(DFSClient.java:1931)
attempt_201011180019_0005_m_000003_0: at
java.io.DataInputStream.readFully(DataInputStream.java:178)
attempt_201011180019_0005_m_000003_0: at
java.io.DataInputStream.readFully(DataInputStream.java:152)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.io.SequenceFile$Reader.init(SequenceFile.java:1457)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1435)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1424)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1419)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.io.MapFile$Reader.createDataFileReader(MapFile.java:302)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.io.MapFile$Reader.open(MapFile.java:284)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.io.MapFile$Reader.<init>(MapFile.java:273)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.io.MapFile$Reader.<init>(MapFile.java:260)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.io.MapFile$Reader.<init>(MapFile.java:253)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:144)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:639)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.mapred.MapTask.run(MapTask.java:315)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.mapred.Child$4.run(Child.java:217)
attempt_201011180019_0005_m_000003_0: at
java.security.AccessController.doPrivileged(Native Method)
attempt_201011180019_0005_m_000003_0: at
javax.security.auth.Subject.doAs(Subject.java:396)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1063)
attempt_201011180019_0005_m_000003_0: at
org.apache.hadoop.mapred.Child.main(Child.java:211)

-Kim

Search Discussions

  • Jeff Bean at Nov 19, 2010 at 12:14 am
    Hi Kim,

    I saw this problem once, turned out the block was getting deleted before it
    was read. Check namenode for blk_-7027776556206952935_61338. What's the
    story there?

    Jeff
    On Thu, Nov 18, 2010 at 12:45 PM, Kim Vogt wrote:

    Hi,

    I'm using the MapFileOutputFormat to lookup values in MapFiles and keep
    getting "Could not obtain block" errors. I'm thinking it might be because
    ulimit is not set high enough. Has anyone else run into this issue?

    attempt_201011180019_0005_m_000003_0: Caught exception while getting cached
    files: java.io.IOException: Could not obtain block:
    blk_-7027776556206952935_61338 file=/mydata/part-r-00000/data
    attempt_201011180019_0005_m_000003_0: at

    org.apache.hadoop.hdfs.DFSClient$DFSInputStream.chooseDataNode(DFSClient.java:1976)
    attempt_201011180019_0005_m_000003_0: at

    org.apache.hadoop.hdfs.DFSClient$DFSInputStream.blockSeekTo(DFSClient.java:1783)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.hdfs.DFSClient$DFSInputStream.read(DFSClient.java:1931)
    attempt_201011180019_0005_m_000003_0: at
    java.io.DataInputStream.readFully(DataInputStream.java:178)
    attempt_201011180019_0005_m_000003_0: at
    java.io.DataInputStream.readFully(DataInputStream.java:152)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.SequenceFile$Reader.init(SequenceFile.java:1457)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1435)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1424)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1419)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.MapFile$Reader.createDataFileReader(MapFile.java:302)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.MapFile$Reader.open(MapFile.java:284)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.MapFile$Reader.<init>(MapFile.java:273)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.MapFile$Reader.<init>(MapFile.java:260)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.MapFile$Reader.<init>(MapFile.java:253)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:144)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:639)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.mapred.MapTask.run(MapTask.java:315)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.mapred.Child$4.run(Child.java:217)
    attempt_201011180019_0005_m_000003_0: at
    java.security.AccessController.doPrivileged(Native Method)
    attempt_201011180019_0005_m_000003_0: at
    javax.security.auth.Subject.doAs(Subject.java:396)
    attempt_201011180019_0005_m_000003_0: at

    org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1063)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.mapred.Child.main(Child.java:211)

    -Kim
  • Kim Vogt at Nov 19, 2010 at 12:27 am
    Hey Jeff,

    I'm not deleting any blocks and "hadoop fsck" returns all the blocks as
    being there and healthy :-/

    -Kim
    On Thu, Nov 18, 2010 at 4:14 PM, Jeff Bean wrote:

    Hi Kim,

    I saw this problem once, turned out the block was getting deleted before it
    was read. Check namenode for blk_-7027776556206952935_61338. What's the
    story there?

    Jeff
    On Thu, Nov 18, 2010 at 12:45 PM, Kim Vogt wrote:

    Hi,

    I'm using the MapFileOutputFormat to lookup values in MapFiles and keep
    getting "Could not obtain block" errors. I'm thinking it might be because
    ulimit is not set high enough. Has anyone else run into this issue?

    attempt_201011180019_0005_m_000003_0: Caught exception while getting cached
    files: java.io.IOException: Could not obtain block:
    blk_-7027776556206952935_61338 file=/mydata/part-r-00000/data
    attempt_201011180019_0005_m_000003_0: at

    org.apache.hadoop.hdfs.DFSClient$DFSInputStream.chooseDataNode(DFSClient.java:1976)
    attempt_201011180019_0005_m_000003_0: at

    org.apache.hadoop.hdfs.DFSClient$DFSInputStream.blockSeekTo(DFSClient.java:1783)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.hdfs.DFSClient$DFSInputStream.read(DFSClient.java:1931)
    attempt_201011180019_0005_m_000003_0: at
    java.io.DataInputStream.readFully(DataInputStream.java:178)
    attempt_201011180019_0005_m_000003_0: at
    java.io.DataInputStream.readFully(DataInputStream.java:152)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.SequenceFile$Reader.init(SequenceFile.java:1457)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1435)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1424)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1419)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.MapFile$Reader.createDataFileReader(MapFile.java:302)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.MapFile$Reader.open(MapFile.java:284)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.MapFile$Reader.<init>(MapFile.java:273)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.MapFile$Reader.<init>(MapFile.java:260)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.io.MapFile$Reader.<init>(MapFile.java:253)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:144)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:639)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.mapred.MapTask.run(MapTask.java:315)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.mapred.Child$4.run(Child.java:217)
    attempt_201011180019_0005_m_000003_0: at
    java.security.AccessController.doPrivileged(Native Method)
    attempt_201011180019_0005_m_000003_0: at
    javax.security.auth.Subject.doAs(Subject.java:396)
    attempt_201011180019_0005_m_000003_0: at

    org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1063)
    attempt_201011180019_0005_m_000003_0: at
    org.apache.hadoop.mapred.Child.main(Child.java:211)

    -Kim

Related Discussions

Discussion Navigation
viewthread | post
Discussion Overview
groupcommon-user @
categorieshadoop
postedNov 18, '10 at 8:45p
activeNov 19, '10 at 12:27a
posts3
users2
websitehadoop.apache.org...
irc#hadoop

2 users in discussion

Kim Vogt: 2 posts Jeff Bean: 1 post

People

Translate

site design / logo © 2022 Grokbase