FAQ
See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/463/>

------------------------------------------
[...truncated 754892 lines...]
[junit] 2010-10-22 00:48:03,367 WARN hdfs.DFSClient (DFSOutputStream.java:setupPipelineForAppendOrRecovery(776)) - Error Recovery for block blk_-863992521392524374_1001 in pipeline 127.0.0.1:51714, 127.0.0.1:59488, 127.0.0.1:35585: bad datanode 127.0.0.1:35585
[junit] 2010-10-22 00:48:03,367 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$8$9594fb70(205)) - FI: fiPipelineAck, datanode=DatanodeRegistration(127.0.0.1:51714, storageID=DS-1113396603-127.0.1.1-51714-1287708473055, infoPort=49069, ipcPort=48461)
[junit] 2010-10-22 00:48:03,368 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
[junit] java.io.IOException: Connection reset by peer
[junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
[junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
[junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
[junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
[junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
[junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
[junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
[junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
[junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
[junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
[junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
[junit] at java.lang.Thread.run(Thread.java:619)
[junit] 2010-10-22 00:48:03,368 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
[junit] java.io.IOException: Connection reset by peer
[junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
[junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
[junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
[junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
[junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
[junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
[junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
[junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
[junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
[junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
[junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
[junit] at java.lang.Thread.run(Thread.java:619)
[junit] 2010-10-22 00:48:03,369 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-863992521392524374_1001 2 Exception java.io.IOException: Connection reset by peer
[junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
[junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
[junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
[junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
[junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
[junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
[junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
[junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
[junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
[junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
[junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
[junit] at java.lang.Thread.run(Thread.java:619)
[junit]
[junit] 2010-10-22 00:48:03,369 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_-863992521392524374_1001 terminating
[junit] 2010-10-22 00:48:03,369 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:51714, storageID=DS-1113396603-127.0.1.1-51714-1287708473055, infoPort=49069, ipcPort=48461)
[junit] 2010-10-22 00:48:03,369 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
[junit] java.io.IOException: Connection reset by peer
[junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
[junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
[junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
[junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
[junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
[junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
[junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
[junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
[junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
[junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
[junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
[junit] at java.lang.Thread.run(Thread.java:619)
[junit] 2010-10-22 00:48:03,370 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:51714, storageID=DS-1113396603-127.0.1.1-51714-1287708473055, infoPort=49069, ipcPort=48461)
[junit] 2010-10-22 00:48:03,370 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
[junit] java.io.IOException: Connection reset by peer
[junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
[junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
[junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
[junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
[junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
[junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
[junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
[junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
[junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
[junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
[junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
[junit] at java.lang.Thread.run(Thread.java:619)
[junit] 2010-10-22 00:48:03,371 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-863992521392524374_1001 1 Exception java.io.IOException: Connection reset by peer
[junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
[junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
[junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
[junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
[junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
[junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
[junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
[junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
[junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
[junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
[junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
[junit] at java.lang.Thread.run(Thread.java:619)
[junit]
[junit] 2010-10-22 00:48:03,371 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_-863992521392524374_1001 terminating
[junit] 2010-10-22 00:48:03,372 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:59488, storageID=DS-1762502180-127.0.1.1-59488-1287708472800, infoPort=46435, ipcPort=53960)
[junit] 2010-10-22 00:48:03,372 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:59488, storageID=DS-1762502180-127.0.1.1-59488-1287708472800, infoPort=46435, ipcPort=53960)
[junit] 2010-10-22 00:48:03,373 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:51714
[junit] 2010-10-22 00:48:03,373 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
[junit] 2010-10-22 00:48:03,373 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-863992521392524374_1001 src: /127.0.0.1:55113 dest: /127.0.0.1:51714
[junit] 2010-10-22 00:48:03,373 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-863992521392524374_1001
[junit] 2010-10-22 00:48:03,374 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:59488
[junit] 2010-10-22 00:48:03,375 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
[junit] 2010-10-22 00:48:03,375 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-863992521392524374_1001 src: /127.0.0.1:60112 dest: /127.0.0.1:59488
[junit] 2010-10-22 00:48:03,375 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-863992521392524374_1001
[junit] 2010-10-22 00:48:03,376 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:51714
[junit] 2010-10-22 00:48:03,377 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-863992521392524374_1002 src: /127.0.0.1:55113 dest: /127.0.0.1:51714 of size 1
[junit] 2010-10-22 00:48:03,377 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:59488 is added to blk_-863992521392524374_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:51714|RBW], ReplicaUnderConstruction[127.0.0.1:59488|RBW], ReplicaUnderConstruction[127.0.0.1:35585|RBW]]} size 0
[junit] 2010-10-22 00:48:03,377 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-863992521392524374_1002 src: /127.0.0.1:60112 dest: /127.0.0.1:59488 of size 1
[junit] 2010-10-22 00:48:03,378 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4553)) - updatePipeline(block=blk_-863992521392524374_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:51714, 127.0.0.1:59488], clientName=DFSClient_342554748)
[junit] 2010-10-22 00:48:03,380 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4595)) - updatePipeline(blk_-863992521392524374_1001) successfully to blk_-863992521392524374_1002
[junit] 2010-10-22 00:48:03,381 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:51714 is added to blk_-863992521392524374_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:51714|RBW], ReplicaUnderConstruction[127.0.0.1:59488|RBW]]} size 1
[junit] 2010-10-22 00:48:03,384 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_342554748
[junit] 2010-10-22 00:48:03,387 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
[junit] 2010-10-22 00:48:03,388 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:51714
[junit] Shutting down the Mini HDFS Cluster
[junit] 2010-10-22 00:48:03,390 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:51714, dest: /127.0.0.1:55115, bytes: 5, op: HDFS_READ, cliID: DFSClient_342554748, offset: 0, srvID: DS-1113396603-127.0.1.1-51714-1287708473055, blockid: blk_-863992521392524374_1002, duration: 292944
[junit] 2010-10-22 00:48:03,390 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
[junit] 2010-10-22 00:48:03,390 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:51714
[junit] 2010-10-22 00:48:03,492 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 60696
[junit] 2010-10-22 00:48:03,492 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 60696: exiting
[junit] 2010-10-22 00:48:03,492 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 60696
[junit] 2010-10-22 00:48:03,493 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
[junit] 2010-10-22 00:48:03,493 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
[junit] 2010-10-22 00:48:03,493 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_-863992521392524374_1001 0 : Thread is interrupted.
[junit] 2010-10-22 00:48:03,493 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:35585, storageID=DS-421696816-127.0.1.1-35585-1287708473297, infoPort=60123, ipcPort=60696)
[junit] 2010-10-22 00:48:03,493 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_-863992521392524374_1001 terminating
[junit] 2010-10-22 00:48:03,493 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:35585, storageID=DS-421696816-127.0.1.1-35585-1287708473297, infoPort=60123, ipcPort=60696):DataXceiveServer: java.nio.channels.AsynchronousCloseException
[junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
[junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
[junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
[junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
[junit] at java.lang.Thread.run(Thread.java:619)
[junit]
[junit] 2010-10-22 00:48:03,494 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:35585, storageID=DS-421696816-127.0.1.1-35585-1287708473297, infoPort=60123, ipcPort=60696)
[junit] 2010-10-22 00:48:03,495 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:35585, storageID=DS-421696816-127.0.1.1-35585-1287708473297, infoPort=60123, ipcPort=60696):DataXceiver
[junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
[junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
[junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
[junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
[junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
[junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
[junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
[junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
[junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
[junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
[junit] at java.lang.Thread.run(Thread.java:619)
[junit] Caused by: java.lang.InterruptedException: sleep interrupted
[junit] at java.lang.Thread.sleep(Native Method)
[junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
[junit] ... 11 more
[junit] 2010-10-22 00:48:03,495 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
[junit] 2010-10-22 00:48:03,496 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
[junit] 2010-10-22 00:48:03,496 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:35585, storageID=DS-421696816-127.0.1.1-35585-1287708473297, infoPort=60123, ipcPort=60696):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
[junit] 2010-10-22 00:48:03,496 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 60696
[junit] 2010-10-22 00:48:03,497 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
[junit] 2010-10-22 00:48:03,497 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
[junit] 2010-10-22 00:48:03,497 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
[junit] 2010-10-22 00:48:03,498 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
[junit] 2010-10-22 00:48:03,498 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
[junit] 2010-10-22 00:48:03,600 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 48461
[junit] 2010-10-22 00:48:03,600 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 48461: exiting
[junit] 2010-10-22 00:48:03,600 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
[junit] 2010-10-22 00:48:03,600 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
[junit] 2010-10-22 00:48:03,601 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:51714, storageID=DS-1113396603-127.0.1.1-51714-1287708473055, infoPort=49069, ipcPort=48461):DataXceiveServer: java.nio.channels.AsynchronousCloseException
[junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
[junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
[junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
[junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
[junit] at java.lang.Thread.run(Thread.java:619)
[junit]
[junit] 2010-10-22 00:48:03,601 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 48461
[junit] 2010-10-22 00:48:03,603 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
[junit] 2010-10-22 00:48:03,603 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
[junit] 2010-10-22 00:48:03,604 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:51714, storageID=DS-1113396603-127.0.1.1-51714-1287708473055, infoPort=49069, ipcPort=48461):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
[junit] 2010-10-22 00:48:03,604 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 48461
[junit] 2010-10-22 00:48:03,604 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
[junit] 2010-10-22 00:48:03,605 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
[junit] 2010-10-22 00:48:03,605 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
[junit] 2010-10-22 00:48:03,605 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
[junit] 2010-10-22 00:48:03,605 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
[junit] 2010-10-22 00:48:03,607 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 53960
[junit] 2010-10-22 00:48:03,608 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 53960: exiting
[junit] 2010-10-22 00:48:03,608 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 53960
[junit] 2010-10-22 00:48:03,608 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:59488, storageID=DS-1762502180-127.0.1.1-59488-1287708472800, infoPort=46435, ipcPort=53960):DataXceiveServer: java.nio.channels.AsynchronousCloseException
[junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
[junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
[junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
[junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
[junit] at java.lang.Thread.run(Thread.java:619)
[junit]
[junit] 2010-10-22 00:48:03,608 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
[junit] 2010-10-22 00:48:03,608 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
[junit] 2010-10-22 00:48:03,709 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
[junit] 2010-10-22 00:48:03,709 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:59488, storageID=DS-1762502180-127.0.1.1-59488-1287708472800, infoPort=46435, ipcPort=53960):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
[junit] 2010-10-22 00:48:03,709 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 53960
[junit] 2010-10-22 00:48:03,709 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
[junit] 2010-10-22 00:48:03,710 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
[junit] 2010-10-22 00:48:03,710 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
[junit] 2010-10-22 00:48:03,710 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
[junit] 2010-10-22 00:48:03,812 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
[junit] 2010-10-22 00:48:03,812 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
[junit] 2010-10-22 00:48:03,812 INFO namenode.FSNamesystem (FSEditLog.java:printStatistics(1122)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3
[junit] 2010-10-22 00:48:03,814 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 49576
[junit] 2010-10-22 00:48:03,814 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 49576: exiting
[junit] 2010-10-22 00:48:03,814 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 49576: exiting
[junit] 2010-10-22 00:48:03,815 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 49576: exiting
[junit] 2010-10-22 00:48:03,815 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 49576: exiting
[junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 88.475 sec
[junit] 2010-10-22 00:48:03,815 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 49576
[junit] 2010-10-22 00:48:03,815 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 49576: exiting
[junit] 2010-10-22 00:48:03,815 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 49576: exiting
[junit] 2010-10-22 00:48:03,815 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
[junit] 2010-10-22 00:48:03,815 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 49576: exiting
[junit] 2010-10-22 00:48:03,815 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 49576: exiting
[junit] 2010-10-22 00:48:03,815 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 49576: exiting
[junit] 2010-10-22 00:48:03,815 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 49576: exiting

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

Total time: 240 minutes 15 seconds
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure

Search Discussions

  • Apache Hudson Server at Oct 22, 2010 at 3:49 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/464/changes>

    Changes:

    [nigel] HADOOP-7003: New script for managing queue of pre-commit patches that need testing. Contributed by nigel.

    [cos] HDFS-1474. ant binary-system is broken. Contributed by Konstantin Boudnik

    ------------------------------------------
    [...truncated 754298 lines...]
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-22 15:47:31,487 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-22 15:47:31,488 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-5894423922388336159_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-22 15:47:31,489 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_-5894423922388336159_1001 terminating
    [junit] 2010-10-22 15:47:31,490 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:39698, storageID=DS-1461797149-127.0.1.1-39698-1287762441417, infoPort=35841, ipcPort=49962)
    [junit] 2010-10-22 15:47:31,490 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-22 15:47:31,490 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-22 15:47:31,490 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:39698, storageID=DS-1461797149-127.0.1.1-39698-1287762441417, infoPort=35841, ipcPort=49962)
    [junit] 2010-10-22 15:47:31,502 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:39698
    [junit] 2010-10-22 15:47:31,502 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-5894423922388336159_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-22 15:47:31,502 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_-5894423922388336159_1001 terminating
    [junit] 2010-10-22 15:47:31,502 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-22 15:47:31,503 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:55930, storageID=DS-660134414-127.0.1.1-55930-1287762440903, infoPort=33703, ipcPort=45302)
    [junit] 2010-10-22 15:47:31,504 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-5894423922388336159_1001 src: /127.0.0.1:59634 dest: /127.0.0.1:39698
    [junit] 2010-10-22 15:47:31,504 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-5894423922388336159_1001
    [junit] 2010-10-22 15:47:31,504 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:55930, storageID=DS-660134414-127.0.1.1-55930-1287762440903, infoPort=33703, ipcPort=45302)
    [junit] 2010-10-22 15:47:31,505 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:55930
    [junit] 2010-10-22 15:47:31,505 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-22 15:47:31,506 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-5894423922388336159_1001 src: /127.0.0.1:41909 dest: /127.0.0.1:55930
    [junit] 2010-10-22 15:47:31,506 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-5894423922388336159_1001
    [junit] 2010-10-22 15:47:31,506 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:39698
    [junit] 2010-10-22 15:47:31,507 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-5894423922388336159_1002 src: /127.0.0.1:41909 dest: /127.0.0.1:55930 of size 1
    [junit] 2010-10-22 15:47:31,508 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-5894423922388336159_1002 src: /127.0.0.1:59634 dest: /127.0.0.1:39698 of size 1
    [junit] 2010-10-22 15:47:31,508 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:55930 is added to blk_-5894423922388336159_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:39698|RBW], ReplicaUnderConstruction[127.0.0.1:55930|RBW], ReplicaUnderConstruction[127.0.0.1:53201|RBW]]} size 0
    [junit] 2010-10-22 15:47:31,509 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4553)) - updatePipeline(block=blk_-5894423922388336159_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:39698, 127.0.0.1:55930], clientName=DFSClient_-1430773085)
    [junit] 2010-10-22 15:47:31,512 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4595)) - updatePipeline(blk_-5894423922388336159_1001) successfully to blk_-5894423922388336159_1002
    [junit] 2010-10-22 15:47:31,512 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:39698 is added to blk_-5894423922388336159_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:39698|RBW], ReplicaUnderConstruction[127.0.0.1:55930|RBW]]} size 1
    [junit] 2010-10-22 15:47:31,515 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_-1430773085
    [junit] 2010-10-22 15:47:31,518 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-10-22 15:47:31,519 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:55930
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-10-22 15:47:31,521 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:55930, dest: /127.0.0.1:41910, bytes: 5, op: HDFS_READ, cliID: DFSClient_-1430773085, offset: 0, srvID: DS-660134414-127.0.1.1-55930-1287762440903, blockid: blk_-5894423922388336159_1002, duration: 277639
    [junit] 2010-10-22 15:47:31,521 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-10-22 15:47:31,521 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:55930
    [junit] 2010-10-22 15:47:31,623 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 49962
    [junit] 2010-10-22 15:47:31,623 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 49962: exiting
    [junit] 2010-10-22 15:47:31,624 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 49962
    [junit] 2010-10-22 15:47:31,624 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-22 15:47:31,624 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-22 15:47:31,624 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:39698, storageID=DS-1461797149-127.0.1.1-39698-1287762441417, infoPort=35841, ipcPort=49962):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-22 15:47:31,626 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-22 15:47:31,627 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-22 15:47:31,627 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:39698, storageID=DS-1461797149-127.0.1.1-39698-1287762441417, infoPort=35841, ipcPort=49962):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-10-22 15:47:31,627 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 49962
    [junit] 2010-10-22 15:47:31,628 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-22 15:47:31,628 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-22 15:47:31,628 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-22 15:47:31,629 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-22 15:47:31,629 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-10-22 15:47:31,731 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 34347
    [junit] 2010-10-22 15:47:31,731 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 34347: exiting
    [junit] 2010-10-22 15:47:31,731 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 34347
    [junit] 2010-10-22 15:47:31,732 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:53201, storageID=DS-977604701-127.0.1.1-53201-1287762441155, infoPort=58095, ipcPort=34347)
    [junit] 2010-10-22 15:47:31,732 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-22 15:47:31,732 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:53201, storageID=DS-977604701-127.0.1.1-53201-1287762441155, infoPort=58095, ipcPort=34347):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-22 15:47:31,732 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_-5894423922388336159_1001 0 : Thread is interrupted.
    [junit] 2010-10-22 15:47:31,732 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-10-22 15:47:31,732 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_-5894423922388336159_1001 terminating
    [junit] 2010-10-22 15:47:31,732 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:53201, storageID=DS-977604701-127.0.1.1-53201-1287762441155, infoPort=58095, ipcPort=34347)
    [junit] 2010-10-22 15:47:31,733 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:53201, storageID=DS-977604701-127.0.1.1-53201-1287762441155, infoPort=58095, ipcPort=34347):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-10-22 15:47:31,735 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-22 15:47:31,735 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-22 15:47:31,736 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:53201, storageID=DS-977604701-127.0.1.1-53201-1287762441155, infoPort=58095, ipcPort=34347):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-10-22 15:47:31,736 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 34347
    [junit] 2010-10-22 15:47:31,736 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-22 15:47:31,736 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-22 15:47:31,737 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-22 15:47:31,737 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-22 15:47:31,737 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-10-22 15:47:31,847 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 45302
    [junit] 2010-10-22 15:47:31,848 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 45302: exiting
    [junit] 2010-10-22 15:47:31,848 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 45302
    [junit] 2010-10-22 15:47:31,848 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:55930, storageID=DS-660134414-127.0.1.1-55930-1287762440903, infoPort=33703, ipcPort=45302):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-22 15:47:31,848 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-22 15:47:31,848 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-22 15:47:31,939 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-22 15:47:31,949 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:55930, storageID=DS-660134414-127.0.1.1-55930-1287762440903, infoPort=33703, ipcPort=45302):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-10-22 15:47:31,949 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 45302
    [junit] 2010-10-22 15:47:31,950 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-22 15:47:31,950 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-22 15:47:31,950 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-22 15:47:31,950 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-22 15:47:32,053 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-22 15:47:32,053 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-22 15:47:32,053 INFO namenode.FSNamesystem (FSEditLog.java:printStatistics(1122)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 3 3
    [junit] 2010-10-22 15:47:32,054 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 43628
    [junit] 2010-10-22 15:47:32,055 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 43628: exiting
    [junit] 2010-10-22 15:47:32,055 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-22 15:47:32,056 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 43628: exiting
    [junit] 2010-10-22 15:47:32,056 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 43628: exiting
    [junit] 2010-10-22 15:47:32,055 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 43628: exiting
    [junit] 2010-10-22 15:47:32,055 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 43628
    [junit] 2010-10-22 15:47:32,055 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 43628: exiting
    [junit] 2010-10-22 15:47:32,055 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 43628: exiting
    [junit] 2010-10-22 15:47:32,055 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 43628: exiting
    [junit] 2010-10-22 15:47:32,055 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 43628: exiting
    [junit] 2010-10-22 15:47:32,055 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 43628: exiting
    [junit] 2010-10-22 15:47:32,055 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 43628: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 92.538 sec

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 254 minutes 12 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Oct 23, 2010 at 3:44 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/465/changes>

    Changes:

    [gkesavan] Fix test-patch with the new hudson url

    ------------------------------------------
    [...truncated 753894 lines...]
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-23 15:44:33,930 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-23 15:44:33,931 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-5675794460184014261_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-23 15:44:33,932 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_-5675794460184014261_1001 terminating
    [junit] 2010-10-23 15:44:33,932 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:36743, storageID=DS-1415184625-127.0.1.1-36743-1287848663350, infoPort=33009, ipcPort=52015)
    [junit] 2010-10-23 15:44:33,932 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-23 15:44:33,935 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-23 15:44:33,935 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:36743, storageID=DS-1415184625-127.0.1.1-36743-1287848663350, infoPort=33009, ipcPort=52015)
    [junit] 2010-10-23 15:44:33,937 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-5675794460184014261_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-23 15:44:33,937 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_-5675794460184014261_1001 terminating
    [junit] 2010-10-23 15:44:33,937 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:44778, storageID=DS-1366290382-127.0.1.1-44778-1287848663868, infoPort=37729, ipcPort=57162)
    [junit] 2010-10-23 15:44:33,938 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:44778, storageID=DS-1366290382-127.0.1.1-44778-1287848663868, infoPort=37729, ipcPort=57162)
    [junit] 2010-10-23 15:44:33,938 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:36743
    [junit] 2010-10-23 15:44:33,938 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-23 15:44:33,939 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-5675794460184014261_1001 src: /127.0.0.1:42989 dest: /127.0.0.1:36743
    [junit] 2010-10-23 15:44:33,939 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-5675794460184014261_1001
    [junit] 2010-10-23 15:44:33,940 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:44778
    [junit] 2010-10-23 15:44:33,940 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-23 15:44:33,940 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-5675794460184014261_1001 src: /127.0.0.1:56246 dest: /127.0.0.1:44778
    [junit] 2010-10-23 15:44:33,940 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-5675794460184014261_1001
    [junit] 2010-10-23 15:44:33,941 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:36743
    [junit] 2010-10-23 15:44:33,941 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-5675794460184014261_1002 src: /127.0.0.1:56246 dest: /127.0.0.1:44778 of size 1
    [junit] 2010-10-23 15:44:33,942 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-5675794460184014261_1002 src: /127.0.0.1:42989 dest: /127.0.0.1:36743 of size 1
    [junit] 2010-10-23 15:44:33,943 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:44778 is added to blk_-5675794460184014261_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:36743|RBW], ReplicaUnderConstruction[127.0.0.1:44778|RBW], ReplicaUnderConstruction[127.0.0.1:38708|RBW]]} size 0
    [junit] 2010-10-23 15:44:33,943 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4553)) - updatePipeline(block=blk_-5675794460184014261_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:36743, 127.0.0.1:44778], clientName=DFSClient_-1934085856)
    [junit] 2010-10-23 15:44:33,945 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4595)) - updatePipeline(blk_-5675794460184014261_1001) successfully to blk_-5675794460184014261_1002
    [junit] 2010-10-23 15:44:33,946 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:36743 is added to blk_-5675794460184014261_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:36743|RBW], ReplicaUnderConstruction[127.0.0.1:44778|RBW]]} size 1
    [junit] 2010-10-23 15:44:33,948 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_-1934085856
    [junit] 2010-10-23 15:44:33,951 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-10-23 15:44:33,953 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:44778
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-10-23 15:44:33,954 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:44778, dest: /127.0.0.1:56247, bytes: 5, op: HDFS_READ, cliID: DFSClient_-1934085856, offset: 0, srvID: DS-1366290382-127.0.1.1-44778-1287848663868, blockid: blk_-5675794460184014261_1002, duration: 263507
    [junit] 2010-10-23 15:44:33,954 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-10-23 15:44:33,954 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:44778
    [junit] 2010-10-23 15:44:34,056 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 57162
    [junit] 2010-10-23 15:44:34,057 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 57162: exiting
    [junit] 2010-10-23 15:44:34,057 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-23 15:44:34,057 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 57162
    [junit] 2010-10-23 15:44:34,057 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:44778, storageID=DS-1366290382-127.0.1.1-44778-1287848663868, infoPort=37729, ipcPort=57162):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-23 15:44:34,057 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-23 15:44:34,059 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-23 15:44:34,059 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:44778, storageID=DS-1366290382-127.0.1.1-44778-1287848663868, infoPort=37729, ipcPort=57162):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-10-23 15:44:34,059 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 57162
    [junit] 2010-10-23 15:44:34,059 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-23 15:44:34,060 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-23 15:44:34,060 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-23 15:44:34,060 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-23 15:44:34,061 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-10-23 15:44:34,162 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 46715
    [junit] 2010-10-23 15:44:34,163 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 46715: exiting
    [junit] 2010-10-23 15:44:34,163 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-23 15:44:34,163 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-10-23 15:44:34,163 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:38708, storageID=DS-1707857160-127.0.1.1-38708-1287848663608, infoPort=57318, ipcPort=46715):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-23 15:44:34,163 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:38708, storageID=DS-1707857160-127.0.1.1-38708-1287848663608, infoPort=57318, ipcPort=46715)
    [junit] 2010-10-23 15:44:34,163 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 46715
    [junit] 2010-10-23 15:44:34,163 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_-5675794460184014261_1001 0 : Thread is interrupted.
    [junit] 2010-10-23 15:44:34,164 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:38708, storageID=DS-1707857160-127.0.1.1-38708-1287848663608, infoPort=57318, ipcPort=46715)
    [junit] 2010-10-23 15:44:34,165 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_-5675794460184014261_1001 terminating
    [junit] 2010-10-23 15:44:34,165 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:38708, storageID=DS-1707857160-127.0.1.1-38708-1287848663608, infoPort=57318, ipcPort=46715):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-10-23 15:44:34,166 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-23 15:44:34,166 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-23 15:44:34,167 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:38708, storageID=DS-1707857160-127.0.1.1-38708-1287848663608, infoPort=57318, ipcPort=46715):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-10-23 15:44:34,167 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 46715
    [junit] 2010-10-23 15:44:34,167 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-23 15:44:34,167 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-23 15:44:34,168 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-23 15:44:34,168 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-23 15:44:34,168 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-10-23 15:44:34,270 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 52015
    [junit] 2010-10-23 15:44:34,270 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 52015: exiting
    [junit] 2010-10-23 15:44:34,270 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-23 15:44:34,270 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-23 15:44:34,271 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:36743, storageID=DS-1415184625-127.0.1.1-36743-1287848663350, infoPort=33009, ipcPort=52015):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-23 15:44:34,270 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 52015
    [junit] 2010-10-23 15:44:34,273 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-23 15:44:34,374 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-23 15:44:34,374 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:36743, storageID=DS-1415184625-127.0.1.1-36743-1287848663350, infoPort=33009, ipcPort=52015):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-10-23 15:44:34,374 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 52015
    [junit] 2010-10-23 15:44:34,375 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-23 15:44:34,375 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-23 15:44:34,375 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-23 15:44:34,389 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-23 15:44:34,491 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-23 15:44:34,491 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-23 15:44:34,491 INFO namenode.FSNamesystem (FSEditLog.java:printStatistics(1122)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 2
    [junit] 2010-10-23 15:44:34,493 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 38392
    [junit] 2010-10-23 15:44:34,493 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 38392: exiting
    [junit] 2010-10-23 15:44:34,493 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 38392: exiting
    [junit] 2010-10-23 15:44:34,493 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 38392: exiting
    [junit] 2010-10-23 15:44:34,494 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 38392: exiting
    [junit] 2010-10-23 15:44:34,494 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 38392: exiting
    [junit] 2010-10-23 15:44:34,494 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 38392: exiting
    [junit] 2010-10-23 15:44:34,494 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 38392: exiting
    [junit] 2010-10-23 15:44:34,494 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 38392: exiting
    [junit] 2010-10-23 15:44:34,494 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 38392
    [junit] 2010-10-23 15:44:34,494 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-23 15:44:34,494 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 38392: exiting
    [junit] 2010-10-23 15:44:34,494 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 38392: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 75.521 sec

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 251 minutes 9 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Oct 24, 2010 at 3:03 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/466/changes>

    Changes:

    [dhruba] HDFS-1472. Allow programmatic access to fsck output.
    (Ramkumar Vadali via dhruba)

    [hairong] HDFS-1435. Provide an option to store fsimage compressed. Contributed by Hairong Kuang.

    ------------------------------------------
    [...truncated 662279 lines...]
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-24 15:03:16,514 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-24 15:03:16,516 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_3342812041894085268_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-24 15:03:16,516 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_3342812041894085268_1001 terminating
    [junit] 2010-10-24 15:03:16,517 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:60696, storageID=DS-395159725-127.0.1.1-60696-1287932586100, infoPort=35570, ipcPort=34653)
    [junit] 2010-10-24 15:03:16,518 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-24 15:03:16,518 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:60696, storageID=DS-395159725-127.0.1.1-60696-1287932586100, infoPort=35570, ipcPort=34653)
    [junit] 2010-10-24 15:03:16,518 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:60696
    [junit] 2010-10-24 15:03:16,519 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-24 15:03:16,518 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-24 15:03:16,520 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_3342812041894085268_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-24 15:03:16,520 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_3342812041894085268_1001 terminating
    [junit] 2010-10-24 15:03:16,520 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:52131, storageID=DS-1245345416-127.0.1.1-52131-1287932586360, infoPort=47937, ipcPort=52333)
    [junit] 2010-10-24 15:03:16,521 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:52131, storageID=DS-1245345416-127.0.1.1-52131-1287932586360, infoPort=47937, ipcPort=52333)
    [junit] 2010-10-24 15:03:16,519 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_3342812041894085268_1001 src: /127.0.0.1:52684 dest: /127.0.0.1:60696
    [junit] 2010-10-24 15:03:16,521 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_3342812041894085268_1001
    [junit] 2010-10-24 15:03:16,522 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:52131
    [junit] 2010-10-24 15:03:16,523 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-24 15:03:16,523 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_3342812041894085268_1001 src: /127.0.0.1:56638 dest: /127.0.0.1:52131
    [junit] 2010-10-24 15:03:16,523 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_3342812041894085268_1001
    [junit] 2010-10-24 15:03:16,524 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:60696
    [junit] 2010-10-24 15:03:16,525 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_3342812041894085268_1002 src: /127.0.0.1:52684 dest: /127.0.0.1:60696 of size 1
    [junit] 2010-10-24 15:03:16,525 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:52131 is added to blk_3342812041894085268_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:60696|RBW], ReplicaUnderConstruction[127.0.0.1:52131|RBW], ReplicaUnderConstruction[127.0.0.1:38718|RBW]]} size 0
    [junit] 2010-10-24 15:03:16,526 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_3342812041894085268_1002 src: /127.0.0.1:56638 dest: /127.0.0.1:52131 of size 1
    [junit] 2010-10-24 15:03:16,526 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4553)) - updatePipeline(block=blk_3342812041894085268_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:60696, 127.0.0.1:52131], clientName=DFSClient_353851221)
    [junit] 2010-10-24 15:03:16,529 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4595)) - updatePipeline(blk_3342812041894085268_1001) successfully to blk_3342812041894085268_1002
    [junit] 2010-10-24 15:03:16,529 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:60696 is added to blk_3342812041894085268_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:60696|RBW], ReplicaUnderConstruction[127.0.0.1:52131|RBW]]} size 1
    [junit] 2010-10-24 15:03:16,532 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_353851221
    [junit] 2010-10-24 15:03:16,535 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-10-24 15:03:16,536 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:52131
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-10-24 15:03:16,538 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:52131, dest: /127.0.0.1:56639, bytes: 5, op: HDFS_READ, cliID: DFSClient_353851221, offset: 0, srvID: DS-1245345416-127.0.1.1-52131-1287932586360, blockid: blk_3342812041894085268_1002, duration: 267409
    [junit] 2010-10-24 15:03:16,538 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-10-24 15:03:16,538 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:52131
    [junit] 2010-10-24 15:03:16,640 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 52333
    [junit] 2010-10-24 15:03:16,640 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 52333: exiting
    [junit] 2010-10-24 15:03:16,641 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-24 15:03:16,641 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:52131, storageID=DS-1245345416-127.0.1.1-52131-1287932586360, infoPort=47937, ipcPort=52333):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-24 15:03:16,641 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 52333
    [junit] 2010-10-24 15:03:16,642 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-24 15:03:16,642 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-24 15:03:16,643 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:52131, storageID=DS-1245345416-127.0.1.1-52131-1287932586360, infoPort=47937, ipcPort=52333):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-10-24 15:03:16,643 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 52333
    [junit] 2010-10-24 15:03:16,643 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-24 15:03:16,643 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-24 15:03:16,643 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-24 15:03:16,644 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-24 15:03:16,644 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-10-24 15:03:16,746 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 34653
    [junit] 2010-10-24 15:03:16,746 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 34653: exiting
    [junit] 2010-10-24 15:03:16,746 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 34653
    [junit] 2010-10-24 15:03:16,747 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:60696, storageID=DS-395159725-127.0.1.1-60696-1287932586100, infoPort=35570, ipcPort=34653):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-24 15:03:16,746 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-24 15:03:16,746 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-24 15:03:16,748 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-24 15:03:16,748 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:60696, storageID=DS-395159725-127.0.1.1-60696-1287932586100, infoPort=35570, ipcPort=34653):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-10-24 15:03:16,748 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 34653
    [junit] 2010-10-24 15:03:16,749 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-24 15:03:16,749 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-24 15:03:16,749 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-24 15:03:16,750 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-24 15:03:16,750 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-10-24 15:03:16,851 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 45007
    [junit] 2010-10-24 15:03:16,852 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 45007: exiting
    [junit] 2010-10-24 15:03:16,852 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-24 15:03:16,852 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-10-24 15:03:16,852 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 45007
    [junit] 2010-10-24 15:03:16,852 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:38718, storageID=DS-1807545415-127.0.1.1-38718-1287932585706, infoPort=41545, ipcPort=45007)
    [junit] 2010-10-24 15:03:16,852 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_3342812041894085268_1001 0 : Thread is interrupted.
    [junit] 2010-10-24 15:03:16,852 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:38718, storageID=DS-1807545415-127.0.1.1-38718-1287932585706, infoPort=41545, ipcPort=45007):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-24 15:03:16,853 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:38718, storageID=DS-1807545415-127.0.1.1-38718-1287932585706, infoPort=41545, ipcPort=45007)
    [junit] 2010-10-24 15:03:16,853 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_3342812041894085268_1001 terminating
    [junit] 2010-10-24 15:03:16,854 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:38718, storageID=DS-1807545415-127.0.1.1-38718-1287932585706, infoPort=41545, ipcPort=45007):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-10-24 15:03:16,855 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-24 15:03:16,889 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-24 15:03:16,955 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:38718, storageID=DS-1807545415-127.0.1.1-38718-1287932585706, infoPort=41545, ipcPort=45007):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-10-24 15:03:16,955 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 45007
    [junit] 2010-10-24 15:03:16,956 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-24 15:03:16,956 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-24 15:03:16,956 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-24 15:03:16,957 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-24 15:03:17,058 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-24 15:03:17,059 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-24 15:03:17,059 INFO namenode.FSNamesystem (FSEditLog.java:printStatistics(1122)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 3
    [junit] 2010-10-24 15:03:17,060 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 56215
    [junit] 2010-10-24 15:03:17,061 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 56215: exiting
    [junit] 2010-10-24 15:03:17,061 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 56215: exiting
    [junit] 2010-10-24 15:03:17,061 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 56215: exiting
    [junit] 2010-10-24 15:03:17,062 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 56215: exiting
    [junit] 2010-10-24 15:03:17,062 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 56215: exiting
    [junit] 2010-10-24 15:03:17,061 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 56215: exiting
    [junit] 2010-10-24 15:03:17,061 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 56215: exiting
    [junit] 2010-10-24 15:03:17,061 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 56215: exiting
    [junit] 2010-10-24 15:03:17,061 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 56215
    [junit] 2010-10-24 15:03:17,061 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 56215: exiting
    [junit] 2010-10-24 15:03:17,061 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-24 15:03:17,062 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 56215: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 71.849 sec

    checkfailure:
    [touch] Creating <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/testsfailed>

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:706: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:473: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/src/test/aop/build/aop.xml>:230: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:664: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:621: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:689: Tests failed!

    Total time: 209 minutes 47 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Oct 25, 2010 at 3:40 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/467/>

    ------------------------------------------
    [...truncated 762604 lines...]
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-25 15:39:45,238 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-25 15:39:45,240 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-4857830530774560018_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-25 15:39:45,241 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_-4857830530774560018_1001 terminating
    [junit] 2010-10-25 15:39:45,241 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:43749
    [junit] 2010-10-25 15:39:45,241 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-25 15:39:45,241 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-25 15:39:45,241 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:43749, storageID=DS-1462025912-127.0.1.1-43749-1288021174904, infoPort=56217, ipcPort=53625)
    [junit] 2010-10-25 15:39:45,242 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:43749, storageID=DS-1462025912-127.0.1.1-43749-1288021174904, infoPort=56217, ipcPort=53625)
    [junit] 2010-10-25 15:39:45,242 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-4857830530774560018_1001 src: /127.0.0.1:37619 dest: /127.0.0.1:43749
    [junit] 2010-10-25 15:39:45,242 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-25 15:39:45,243 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-4857830530774560018_1001
    [junit] 2010-10-25 15:39:45,243 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-4857830530774560018_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-25 15:39:45,244 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_-4857830530774560018_1001 terminating
    [junit] 2010-10-25 15:39:45,245 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:55626, storageID=DS-222547800-127.0.1.1-55626-1288021175168, infoPort=48776, ipcPort=46110)
    [junit] 2010-10-25 15:39:45,245 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:55626, storageID=DS-222547800-127.0.1.1-55626-1288021175168, infoPort=48776, ipcPort=46110)
    [junit] 2010-10-25 15:39:45,246 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:55626
    [junit] 2010-10-25 15:39:45,246 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-25 15:39:45,246 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-4857830530774560018_1001 src: /127.0.0.1:40979 dest: /127.0.0.1:55626
    [junit] 2010-10-25 15:39:45,246 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-4857830530774560018_1001
    [junit] 2010-10-25 15:39:45,247 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:43749
    [junit] 2010-10-25 15:39:45,247 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-4857830530774560018_1002 src: /127.0.0.1:40979 dest: /127.0.0.1:55626 of size 1
    [junit] 2010-10-25 15:39:45,248 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-4857830530774560018_1002 src: /127.0.0.1:37619 dest: /127.0.0.1:43749 of size 1
    [junit] 2010-10-25 15:39:45,249 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:55626 is added to blk_-4857830530774560018_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:43749|RBW], ReplicaUnderConstruction[127.0.0.1:55626|RBW], ReplicaUnderConstruction[127.0.0.1:35754|RBW]]} size 0
    [junit] 2010-10-25 15:39:45,249 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4553)) - updatePipeline(block=blk_-4857830530774560018_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:43749, 127.0.0.1:55626], clientName=DFSClient_-739674863)
    [junit] 2010-10-25 15:39:45,251 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4595)) - updatePipeline(blk_-4857830530774560018_1001) successfully to blk_-4857830530774560018_1002
    [junit] 2010-10-25 15:39:45,252 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:43749 is added to blk_-4857830530774560018_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:43749|RBW], ReplicaUnderConstruction[127.0.0.1:55626|RBW]]} size 1
    [junit] 2010-10-25 15:39:45,254 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_-739674863
    [junit] 2010-10-25 15:39:45,257 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-10-25 15:39:45,258 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:43749
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-10-25 15:39:45,259 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:43749, dest: /127.0.0.1:37621, bytes: 5, op: HDFS_READ, cliID: DFSClient_-739674863, offset: 0, srvID: DS-1462025912-127.0.1.1-43749-1288021174904, blockid: blk_-4857830530774560018_1002, duration: 263389
    [junit] 2010-10-25 15:39:45,260 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-10-25 15:39:45,260 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:43749
    [junit] 2010-10-25 15:39:45,362 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 46110
    [junit] 2010-10-25 15:39:45,362 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 46110: exiting
    [junit] 2010-10-25 15:39:45,362 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 46110
    [junit] 2010-10-25 15:39:45,362 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-25 15:39:45,362 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:55626, storageID=DS-222547800-127.0.1.1-55626-1288021175168, infoPort=48776, ipcPort=46110):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-25 15:39:45,362 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-25 15:39:45,365 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-25 15:39:45,365 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-25 15:39:45,366 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:55626, storageID=DS-222547800-127.0.1.1-55626-1288021175168, infoPort=48776, ipcPort=46110):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-10-25 15:39:45,366 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 46110
    [junit] 2010-10-25 15:39:45,366 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-25 15:39:45,366 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-25 15:39:45,366 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-25 15:39:45,367 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-25 15:39:45,367 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-10-25 15:39:45,469 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 53625
    [junit] 2010-10-25 15:39:45,469 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 53625: exiting
    [junit] 2010-10-25 15:39:45,469 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 53625
    [junit] 2010-10-25 15:39:45,470 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-25 15:39:45,470 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:43749, storageID=DS-1462025912-127.0.1.1-43749-1288021174904, infoPort=56217, ipcPort=53625):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-25 15:39:45,471 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-25 15:39:45,471 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-25 15:39:45,472 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:43749, storageID=DS-1462025912-127.0.1.1-43749-1288021174904, infoPort=56217, ipcPort=53625):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-10-25 15:39:45,472 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 53625
    [junit] 2010-10-25 15:39:45,472 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-25 15:39:45,472 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-25 15:39:45,472 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-25 15:39:45,473 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-25 15:39:45,473 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-10-25 15:39:45,575 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 55939
    [junit] 2010-10-25 15:39:45,575 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 55939: exiting
    [junit] 2010-10-25 15:39:45,575 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-25 15:39:45,575 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 55939
    [junit] 2010-10-25 15:39:45,576 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_-4857830530774560018_1001 0 : Thread is interrupted.
    [junit] 2010-10-25 15:39:45,576 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:35754, storageID=DS-1897299291-127.0.1.1-35754-1288021174639, infoPort=34672, ipcPort=55939)
    [junit] 2010-10-25 15:39:45,575 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:35754, storageID=DS-1897299291-127.0.1.1-35754-1288021174639, infoPort=34672, ipcPort=55939):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-25 15:39:45,575 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-10-25 15:39:45,576 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:35754, storageID=DS-1897299291-127.0.1.1-35754-1288021174639, infoPort=34672, ipcPort=55939)
    [junit] 2010-10-25 15:39:45,576 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_-4857830530774560018_1001 terminating
    [junit] 2010-10-25 15:39:45,577 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:35754, storageID=DS-1897299291-127.0.1.1-35754-1288021174639, infoPort=34672, ipcPort=55939):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-10-25 15:39:45,579 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-25 15:39:45,668 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-25 15:39:45,679 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:35754, storageID=DS-1897299291-127.0.1.1-35754-1288021174639, infoPort=34672, ipcPort=55939):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-10-25 15:39:45,679 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 55939
    [junit] 2010-10-25 15:39:45,680 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-25 15:39:45,680 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-25 15:39:45,680 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-25 15:39:45,681 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-25 15:39:45,782 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-25 15:39:45,782 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-25 15:39:45,783 INFO namenode.FSNamesystem (FSEditLog.java:printStatistics(1122)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 2
    [junit] 2010-10-25 15:39:45,784 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 47951
    [junit] 2010-10-25 15:39:45,785 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 47951: exiting
    [junit] 2010-10-25 15:39:45,785 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 47951: exiting
    [junit] 2010-10-25 15:39:45,785 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 47951: exiting
    [junit] 2010-10-25 15:39:45,785 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 47951: exiting
    [junit] 2010-10-25 15:39:45,785 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 47951: exiting
    [junit] 2010-10-25 15:39:45,785 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 47951: exiting
    [junit] 2010-10-25 15:39:45,785 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 47951: exiting
    [junit] 2010-10-25 15:39:45,785 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 47951: exiting
    [junit] 2010-10-25 15:39:45,786 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 47951: exiting
    [junit] 2010-10-25 15:39:45,786 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 47951
    [junit] 2010-10-25 15:39:45,785 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 47951: exiting
    [junit] 2010-10-25 15:39:45,785 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 91.716 sec

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 246 minutes 17 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Error updating JIRA issues. Saving issues for next build.
    java.net.ConnectException: Connection timed out
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Oct 26, 2010 at 2:54 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/468/changes>

    Changes:

    [gkesavan] HADOOP-7007: Update the hudson-test-patch ant target. Contributed by Giridharan Kesavan.

    [gkesavan] HADOOP-7007: Update the hudson-test-patch ant target. Contributed by Giridharan Kesavan.

    [nigel] HADOOP-7005: Update test-patch.sh to remove callback to Hudson master. Contributed by nigel.

    ------------------------------------------
    [...truncated 689187 lines...]
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-26 14:54:39,072 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-26 14:54:39,074 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-3200216660841758636_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-26 14:54:39,074 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_-3200216660841758636_1001 terminating
    [junit] 2010-10-26 14:54:39,075 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:52042, storageID=DS-1150598151-127.0.1.1-52042-1288104868905, infoPort=37934, ipcPort=53310)
    [junit] 2010-10-26 14:54:39,075 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-26 14:54:39,076 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-26 14:54:39,076 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-3200216660841758636_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-26 14:54:39,076 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_-3200216660841758636_1001 terminating
    [junit] 2010-10-26 14:54:39,075 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:52042, storageID=DS-1150598151-127.0.1.1-52042-1288104868905, infoPort=37934, ipcPort=53310)
    [junit] 2010-10-26 14:54:39,077 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:44602, storageID=DS-610067579-127.0.1.1-44602-1288104868380, infoPort=42123, ipcPort=38023)
    [junit] 2010-10-26 14:54:39,076 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:52042
    [junit] 2010-10-26 14:54:39,078 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:44602, storageID=DS-610067579-127.0.1.1-44602-1288104868380, infoPort=42123, ipcPort=38023)
    [junit] 2010-10-26 14:54:39,078 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-26 14:54:39,079 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-3200216660841758636_1001 src: /127.0.0.1:47690 dest: /127.0.0.1:52042
    [junit] 2010-10-26 14:54:39,079 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-3200216660841758636_1001
    [junit] 2010-10-26 14:54:39,080 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:44602
    [junit] 2010-10-26 14:54:39,080 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-26 14:54:39,080 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-3200216660841758636_1001 src: /127.0.0.1:40770 dest: /127.0.0.1:44602
    [junit] 2010-10-26 14:54:39,081 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-3200216660841758636_1001
    [junit] 2010-10-26 14:54:39,081 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:52042
    [junit] 2010-10-26 14:54:39,082 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-3200216660841758636_1002 src: /127.0.0.1:40770 dest: /127.0.0.1:44602 of size 1
    [junit] 2010-10-26 14:54:39,082 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-3200216660841758636_1002 src: /127.0.0.1:47690 dest: /127.0.0.1:52042 of size 1
    [junit] 2010-10-26 14:54:39,083 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:44602 is added to blk_-3200216660841758636_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:52042|RBW], ReplicaUnderConstruction[127.0.0.1:44602|RBW], ReplicaUnderConstruction[127.0.0.1:50651|RBW]]} size 0
    [junit] 2010-10-26 14:54:39,084 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4553)) - updatePipeline(block=blk_-3200216660841758636_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:52042, 127.0.0.1:44602], clientName=DFSClient_-1910110805)
    [junit] 2010-10-26 14:54:39,086 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4595)) - updatePipeline(blk_-3200216660841758636_1001) successfully to blk_-3200216660841758636_1002
    [junit] 2010-10-26 14:54:39,086 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:52042 is added to blk_-3200216660841758636_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:52042|RBW], ReplicaUnderConstruction[127.0.0.1:44602|RBW]]} size 1
    [junit] 2010-10-26 14:54:39,089 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_-1910110805
    [junit] 2010-10-26 14:54:39,091 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-10-26 14:54:39,093 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:44602
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-10-26 14:54:39,094 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-10-26 14:54:39,094 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:44602, dest: /127.0.0.1:40771, bytes: 5, op: HDFS_READ, cliID: DFSClient_-1910110805, offset: 0, srvID: DS-610067579-127.0.1.1-44602-1288104868380, blockid: blk_-3200216660841758636_1002, duration: 264115
    [junit] 2010-10-26 14:54:39,095 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:44602
    [junit] 2010-10-26 14:54:39,196 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 53310
    [junit] 2010-10-26 14:54:39,197 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 53310: exiting
    [junit] 2010-10-26 14:54:39,197 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 53310
    [junit] 2010-10-26 14:54:39,197 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:52042, storageID=DS-1150598151-127.0.1.1-52042-1288104868905, infoPort=37934, ipcPort=53310):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-26 14:54:39,197 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-26 14:54:39,197 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-26 14:54:39,198 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-26 14:54:39,199 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:52042, storageID=DS-1150598151-127.0.1.1-52042-1288104868905, infoPort=37934, ipcPort=53310):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-10-26 14:54:39,199 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 53310
    [junit] 2010-10-26 14:54:39,199 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-26 14:54:39,199 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-26 14:54:39,199 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-26 14:54:39,200 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-26 14:54:39,200 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-10-26 14:54:39,202 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 56210
    [junit] 2010-10-26 14:54:39,202 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 56210: exiting
    [junit] 2010-10-26 14:54:39,203 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 56210
    [junit] 2010-10-26 14:54:39,203 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-26 14:54:39,203 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:50651, storageID=DS-1047562687-127.0.1.1-50651-1288104868651, infoPort=40209, ipcPort=56210)
    [junit] 2010-10-26 14:54:39,203 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:50651, storageID=DS-1047562687-127.0.1.1-50651-1288104868651, infoPort=40209, ipcPort=56210)
    [junit] 2010-10-26 14:54:39,203 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_-3200216660841758636_1001 0 : Thread is interrupted.
    [junit] 2010-10-26 14:54:39,203 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:50651, storageID=DS-1047562687-127.0.1.1-50651-1288104868651, infoPort=40209, ipcPort=56210):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-26 14:54:39,203 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-10-26 14:54:39,204 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_-3200216660841758636_1001 terminating
    [junit] 2010-10-26 14:54:39,204 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:50651, storageID=DS-1047562687-127.0.1.1-50651-1288104868651, infoPort=40209, ipcPort=56210):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-10-26 14:54:39,206 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-26 14:54:39,207 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-26 14:54:39,207 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:50651, storageID=DS-1047562687-127.0.1.1-50651-1288104868651, infoPort=40209, ipcPort=56210):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-10-26 14:54:39,207 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 56210
    [junit] 2010-10-26 14:54:39,207 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-26 14:54:39,208 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-26 14:54:39,208 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-26 14:54:39,208 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-26 14:54:39,208 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-10-26 14:54:39,310 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 38023
    [junit] 2010-10-26 14:54:39,310 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 38023: exiting
    [junit] 2010-10-26 14:54:39,311 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 38023
    [junit] 2010-10-26 14:54:39,311 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-26 14:54:39,311 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:44602, storageID=DS-610067579-127.0.1.1-44602-1288104868380, infoPort=42123, ipcPort=38023):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-26 14:54:39,311 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-26 14:54:39,404 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-26 14:54:39,412 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:44602, storageID=DS-610067579-127.0.1.1-44602-1288104868380, infoPort=42123, ipcPort=38023):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-10-26 14:54:39,412 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 38023
    [junit] 2010-10-26 14:54:39,412 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-26 14:54:39,413 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-26 14:54:39,413 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-26 14:54:39,413 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-26 14:54:39,515 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-26 14:54:39,515 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-26 14:54:39,516 INFO namenode.FSNamesystem (FSEditLog.java:printStatistics(1122)) - Number of transactions: 6 Total time for transactions(ms): 3Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 4
    [junit] 2010-10-26 14:54:39,517 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 38112
    [junit] 2010-10-26 14:54:39,517 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 38112: exiting
    [junit] 2010-10-26 14:54:39,517 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 38112: exiting
    [junit] 2010-10-26 14:54:39,518 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 38112: exiting
    [junit] 2010-10-26 14:54:39,517 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 38112: exiting
    [junit] 2010-10-26 14:54:39,518 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 38112: exiting
    [junit] 2010-10-26 14:54:39,518 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 38112: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 88.556 sec
    [junit] 2010-10-26 14:54:39,518 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 38112
    [junit] 2010-10-26 14:54:39,518 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 38112: exiting
    [junit] 2010-10-26 14:54:39,518 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 38112: exiting
    [junit] 2010-10-26 14:54:39,518 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-26 14:54:39,518 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 38112: exiting
    [junit] 2010-10-26 14:54:39,518 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 38112: exiting

    checkfailure:
    [touch] Creating <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/testsfailed>

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:706: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:473: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/src/test/aop/build/aop.xml>:230: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:664: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:621: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:689: Tests failed!

    Total time: 201 minutes 4 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Oct 27, 2010 at 4:41 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/469/>

    ------------------------------------------
    [...truncated 733476 lines...]
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-27 15:29:53,500 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-27 15:29:53,502 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-243544281360926144_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-27 15:29:53,503 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_-243544281360926144_1001 terminating
    [junit] 2010-10-27 15:29:53,503 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:49098, storageID=DS-1073259287-127.0.1.1-49098-1288193383435, infoPort=52401, ipcPort=35550)
    [junit] 2010-10-27 15:29:53,503 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:49098
    [junit] 2010-10-27 15:29:53,503 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-27 15:29:53,504 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-27 15:29:53,504 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:49098, storageID=DS-1073259287-127.0.1.1-49098-1288193383435, infoPort=52401, ipcPort=35550)
    [junit] 2010-10-27 15:29:53,505 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-243544281360926144_1001 src: /127.0.0.1:43967 dest: /127.0.0.1:49098
    [junit] 2010-10-27 15:29:53,504 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-27 15:29:53,505 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-243544281360926144_1001
    [junit] 2010-10-27 15:29:53,506 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-243544281360926144_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-27 15:29:53,506 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_-243544281360926144_1001 terminating
    [junit] 2010-10-27 15:29:53,506 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:40332, storageID=DS-1891322547-127.0.1.1-40332-1288193382910, infoPort=52737, ipcPort=46068)
    [junit] 2010-10-27 15:29:53,507 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:40332, storageID=DS-1891322547-127.0.1.1-40332-1288193382910, infoPort=52737, ipcPort=46068)
    [junit] 2010-10-27 15:29:53,507 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:40332
    [junit] 2010-10-27 15:29:53,509 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-27 15:29:53,509 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-243544281360926144_1001 src: /127.0.0.1:45284 dest: /127.0.0.1:40332
    [junit] 2010-10-27 15:29:53,509 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-243544281360926144_1001
    [junit] 2010-10-27 15:29:53,509 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:49098
    [junit] 2010-10-27 15:29:53,510 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-243544281360926144_1002 src: /127.0.0.1:45284 dest: /127.0.0.1:40332 of size 1
    [junit] 2010-10-27 15:29:53,511 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-243544281360926144_1002 src: /127.0.0.1:43967 dest: /127.0.0.1:49098 of size 1
    [junit] 2010-10-27 15:29:53,511 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:40332 is added to blk_-243544281360926144_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:49098|RBW], ReplicaUnderConstruction[127.0.0.1:40332|RBW], ReplicaUnderConstruction[127.0.0.1:59861|RBW]]} size 0
    [junit] 2010-10-27 15:29:53,512 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4553)) - updatePipeline(block=blk_-243544281360926144_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:49098, 127.0.0.1:40332], clientName=DFSClient_-1642891333)
    [junit] 2010-10-27 15:29:53,514 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4595)) - updatePipeline(blk_-243544281360926144_1001) successfully to blk_-243544281360926144_1002
    [junit] 2010-10-27 15:29:53,514 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:49098 is added to blk_-243544281360926144_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:49098|RBW], ReplicaUnderConstruction[127.0.0.1:40332|RBW]]} size 1
    [junit] 2010-10-27 15:29:53,517 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_-1642891333
    [junit] 2010-10-27 15:29:53,520 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-10-27 15:29:53,522 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:49098
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-10-27 15:29:53,523 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-10-27 15:29:53,523 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:49098, dest: /127.0.0.1:43969, bytes: 5, op: HDFS_READ, cliID: DFSClient_-1642891333, offset: 0, srvID: DS-1073259287-127.0.1.1-49098-1288193383435, blockid: blk_-243544281360926144_1002, duration: 276445
    [junit] 2010-10-27 15:29:53,524 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:49098
    [junit] 2010-10-27 15:29:53,625 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 35550
    [junit] 2010-10-27 15:29:53,625 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 35550: exiting
    [junit] 2010-10-27 15:29:53,625 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 35550
    [junit] 2010-10-27 15:29:53,626 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-27 15:29:53,625 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-27 15:29:53,626 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:49098, storageID=DS-1073259287-127.0.1.1-49098-1288193383435, infoPort=52401, ipcPort=35550):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-27 15:29:53,628 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-27 15:29:53,629 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-27 15:29:53,629 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:49098, storageID=DS-1073259287-127.0.1.1-49098-1288193383435, infoPort=52401, ipcPort=35550):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-10-27 15:29:53,629 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 35550
    [junit] 2010-10-27 15:29:53,629 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-27 15:29:53,630 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-27 15:29:53,630 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-27 15:29:53,630 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-27 15:29:53,631 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-10-27 15:29:53,732 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 60169
    [junit] 2010-10-27 15:29:53,733 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 60169: exiting
    [junit] 2010-10-27 15:29:53,733 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-27 15:29:53,733 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 60169
    [junit] 2010-10-27 15:29:53,734 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-10-27 15:29:53,734 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_-243544281360926144_1001 0 : Thread is interrupted.
    [junit] 2010-10-27 15:29:53,733 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:59861, storageID=DS-1693914179-127.0.1.1-59861-1288193383175, infoPort=35785, ipcPort=60169):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-27 15:29:53,734 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_-243544281360926144_1001 terminating
    [junit] 2010-10-27 15:29:53,734 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:59861, storageID=DS-1693914179-127.0.1.1-59861-1288193383175, infoPort=35785, ipcPort=60169)
    [junit] 2010-10-27 15:29:53,735 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:59861, storageID=DS-1693914179-127.0.1.1-59861-1288193383175, infoPort=35785, ipcPort=60169)
    [junit] 2010-10-27 15:29:53,735 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:59861, storageID=DS-1693914179-127.0.1.1-59861-1288193383175, infoPort=35785, ipcPort=60169):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-10-27 15:29:53,736 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-27 15:29:53,736 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-27 15:29:53,737 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:59861, storageID=DS-1693914179-127.0.1.1-59861-1288193383175, infoPort=35785, ipcPort=60169):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-10-27 15:29:53,737 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 60169
    [junit] 2010-10-27 15:29:53,737 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-27 15:29:53,738 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-27 15:29:53,738 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-27 15:29:53,738 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-27 15:29:53,738 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-10-27 15:29:53,840 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 46068
    [junit] 2010-10-27 15:29:53,840 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 46068: exiting
    [junit] 2010-10-27 15:29:53,840 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 46068
    [junit] 2010-10-27 15:29:53,841 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-27 15:29:53,841 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-27 15:29:53,841 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:40332, storageID=DS-1891322547-127.0.1.1-40332-1288193382910, infoPort=52737, ipcPort=46068):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-27 15:29:53,843 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-27 15:29:53,933 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-27 15:29:53,944 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:40332, storageID=DS-1891322547-127.0.1.1-40332-1288193382910, infoPort=52737, ipcPort=46068):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-10-27 15:29:53,944 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 46068
    [junit] 2010-10-27 15:29:53,944 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-27 15:29:53,944 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-27 15:29:53,944 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-27 15:29:53,945 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-27 15:29:54,047 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-27 15:29:54,047 INFO namenode.FSNamesystem (FSEditLog.java:printStatistics(1122)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3
    [junit] 2010-10-27 15:29:54,047 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-27 15:29:54,048 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 54019
    [junit] 2010-10-27 15:29:54,048 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 54019: exiting
    [junit] 2010-10-27 15:29:54,049 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 54019
    [junit] 2010-10-27 15:29:54,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 54019: exiting
    [junit] 2010-10-27 15:29:54,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 54019: exiting
    [junit] 2010-10-27 15:29:54,048 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 54019: exiting
    [junit] 2010-10-27 15:29:54,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 54019: exiting
    [junit] 2010-10-27 15:29:54,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 54019: exiting
    [junit] 2010-10-27 15:29:54,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 54019: exiting
    [junit] 2010-10-27 15:29:54,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 54019: exiting
    [junit] 2010-10-27 15:29:54,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 54019: exiting
    [junit] 2010-10-27 15:29:54,049 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-27 15:29:54,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 54019: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 46.499 sec

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 236 minutes 18 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Oct 28, 2010 at 4:02 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/470/>

    ------------------------------------------
    [...truncated 793167 lines...]
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-28 16:02:32,559 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Broken pipe
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-28 16:02:32,560 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_8894127925260049793_1001 2 Exception java.io.IOException: Broken pipe
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-28 16:02:32,560 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_8894127925260049793_1001 terminating
    [junit] 2010-10-28 16:02:32,562 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:44602
    [junit] 2010-10-28 16:02:32,562 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-28 16:02:32,563 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_8894127925260049793_1001 src: /127.0.0.1:44464 dest: /127.0.0.1:44602
    [junit] 2010-10-28 16:02:32,563 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_8894127925260049793_1001
    [junit] 2010-10-28 16:02:32,571 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:44602, storageID=DS-337578606-127.0.1.1-44602-1288281742230, infoPort=43130, ipcPort=33659)
    [junit] 2010-10-28 16:02:32,571 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-28 16:02:32,572 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-28 16:02:32,572 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_8894127925260049793_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-28 16:02:32,573 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_8894127925260049793_1001 terminating
    [junit] 2010-10-28 16:02:32,573 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:35177, storageID=DS-1532553751-127.0.1.1-35177-1288281741943, infoPort=36672, ipcPort=40556)
    [junit] 2010-10-28 16:02:32,573 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:35177, storageID=DS-1532553751-127.0.1.1-35177-1288281741943, infoPort=36672, ipcPort=40556)
    [junit] 2010-10-28 16:02:32,572 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:44602, storageID=DS-337578606-127.0.1.1-44602-1288281742230, infoPort=43130, ipcPort=33659)
    [junit] 2010-10-28 16:02:32,575 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:35177
    [junit] 2010-10-28 16:02:32,575 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-28 16:02:32,575 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_8894127925260049793_1001 src: /127.0.0.1:56740 dest: /127.0.0.1:35177
    [junit] 2010-10-28 16:02:32,575 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_8894127925260049793_1001
    [junit] 2010-10-28 16:02:32,576 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_8894127925260049793_1002 src: /127.0.0.1:56740 dest: /127.0.0.1:35177 of size 1
    [junit] 2010-10-28 16:02:32,577 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:35177 is added to blk_8894127925260049793_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:44602|RBW], ReplicaUnderConstruction[127.0.0.1:35177|RBW], ReplicaUnderConstruction[127.0.0.1:56144|RBW]]} size 0
    [junit] 2010-10-28 16:02:32,576 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:44602
    [junit] 2010-10-28 16:02:32,578 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_8894127925260049793_1002 src: /127.0.0.1:44464 dest: /127.0.0.1:44602 of size 1
    [junit] 2010-10-28 16:02:32,580 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:44602 is added to blk_8894127925260049793_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:44602|RBW], ReplicaUnderConstruction[127.0.0.1:35177|RBW], ReplicaUnderConstruction[127.0.0.1:56144|RBW]]} size 0
    [junit] 2010-10-28 16:02:32,581 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4553)) - updatePipeline(block=blk_8894127925260049793_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:44602, 127.0.0.1:35177], clientName=DFSClient_1464364156)
    [junit] 2010-10-28 16:02:32,582 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4595)) - updatePipeline(blk_8894127925260049793_1001) successfully to blk_8894127925260049793_1002
    [junit] 2010-10-28 16:02:32,585 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_1464364156
    [junit] 2010-10-28 16:02:32,588 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-10-28 16:02:32,589 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:44602
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-10-28 16:02:32,591 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:44602, dest: /127.0.0.1:44466, bytes: 5, op: HDFS_READ, cliID: DFSClient_1464364156, offset: 0, srvID: DS-337578606-127.0.1.1-44602-1288281742230, blockid: blk_8894127925260049793_1002, duration: 281718
    [junit] 2010-10-28 16:02:32,591 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-10-28 16:02:32,591 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:44602
    [junit] 2010-10-28 16:02:32,693 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 45437
    [junit] 2010-10-28 16:02:32,693 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 45437: exiting
    [junit] 2010-10-28 16:02:32,694 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 45437
    [junit] 2010-10-28 16:02:32,694 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-28 16:02:32,694 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-10-28 16:02:32,694 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:56144, storageID=DS-460881069-127.0.1.1-56144-1288281742494, infoPort=44189, ipcPort=45437):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-28 16:02:32,694 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:56144, storageID=DS-460881069-127.0.1.1-56144-1288281742494, infoPort=44189, ipcPort=45437)
    [junit] 2010-10-28 16:02:32,694 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_8894127925260049793_1001 0 : Thread is interrupted.
    [junit] 2010-10-28 16:02:32,695 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_8894127925260049793_1001 terminating
    [junit] 2010-10-28 16:02:32,695 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:56144, storageID=DS-460881069-127.0.1.1-56144-1288281742494, infoPort=44189, ipcPort=45437)
    [junit] 2010-10-28 16:02:32,696 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:56144, storageID=DS-460881069-127.0.1.1-56144-1288281742494, infoPort=44189, ipcPort=45437):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-10-28 16:02:32,696 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-28 16:02:32,697 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-28 16:02:32,698 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:56144, storageID=DS-460881069-127.0.1.1-56144-1288281742494, infoPort=44189, ipcPort=45437):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-10-28 16:02:32,698 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 45437
    [junit] 2010-10-28 16:02:32,698 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-28 16:02:32,698 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-28 16:02:32,698 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-28 16:02:32,699 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-28 16:02:32,699 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-10-28 16:02:32,801 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 33659
    [junit] 2010-10-28 16:02:32,801 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 33659: exiting
    [junit] 2010-10-28 16:02:32,802 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 33659
    [junit] 2010-10-28 16:02:32,802 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-28 16:02:32,802 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:44602, storageID=DS-337578606-127.0.1.1-44602-1288281742230, infoPort=43130, ipcPort=33659):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-28 16:02:32,802 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-28 16:02:32,803 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-28 16:02:32,803 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:44602, storageID=DS-337578606-127.0.1.1-44602-1288281742230, infoPort=43130, ipcPort=33659):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-10-28 16:02:32,803 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 33659
    [junit] 2010-10-28 16:02:32,803 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-28 16:02:32,804 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-28 16:02:32,804 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-28 16:02:32,804 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-28 16:02:32,805 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-10-28 16:02:32,924 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 40556
    [junit] 2010-10-28 16:02:32,925 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 40556: exiting
    [junit] 2010-10-28 16:02:32,925 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 40556
    [junit] 2010-10-28 16:02:32,926 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-28 16:02:32,926 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-28 16:02:32,926 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:35177, storageID=DS-1532553751-127.0.1.1-35177-1288281741943, infoPort=36672, ipcPort=40556):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-28 16:02:32,928 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-28 16:02:32,992 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-28 16:02:33,029 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:35177, storageID=DS-1532553751-127.0.1.1-35177-1288281741943, infoPort=36672, ipcPort=40556):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-10-28 16:02:33,029 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 40556
    [junit] 2010-10-28 16:02:33,029 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-28 16:02:33,029 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-28 16:02:33,030 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-28 16:02:33,030 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-28 16:02:33,132 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-28 16:02:33,132 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-28 16:02:33,132 INFO namenode.FSNamesystem (FSEditLog.java:printStatistics(1122)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3
    [junit] 2010-10-28 16:02:33,134 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 42440
    [junit] 2010-10-28 16:02:33,134 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 42440: exiting
    [junit] 2010-10-28 16:02:33,135 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 42440
    [junit] 2010-10-28 16:02:33,135 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 42440: exiting
    [junit] 2010-10-28 16:02:33,135 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 42440: exiting
    [junit] 2010-10-28 16:02:33,135 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 42440: exiting
    [junit] 2010-10-28 16:02:33,135 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 42440: exiting
    [junit] 2010-10-28 16:02:33,136 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 42440: exiting
    [junit] 2010-10-28 16:02:33,136 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 42440: exiting
    [junit] 2010-10-28 16:02:33,136 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 42440: exiting
    [junit] 2010-10-28 16:02:33,136 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 42440: exiting
    [junit] 2010-10-28 16:02:33,135 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 42440: exiting
    [junit] 2010-10-28 16:02:33,135 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 75.997 sec

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 268 minutes 45 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Oct 29, 2010 at 3:04 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/471/>

    ------------------------------------------
    [...truncated 766785 lines...]
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-29 15:04:47,485 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-29 15:04:47,485 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-5088119518946758905_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-29 15:04:47,486 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_-5088119518946758905_1001 terminating
    [junit] 2010-10-29 15:04:47,486 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:37165, storageID=DS-1039865817-127.0.1.1-37165-1288364676881, infoPort=42366, ipcPort=40830)
    [junit] 2010-10-29 15:04:47,486 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:37165, storageID=DS-1039865817-127.0.1.1-37165-1288364676881, infoPort=42366, ipcPort=40830)
    [junit] 2010-10-29 15:04:47,486 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-29 15:04:47,487 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-29 15:04:47,489 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-5088119518946758905_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-29 15:04:47,489 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:37165
    [junit] 2010-10-29 15:04:47,489 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_-5088119518946758905_1001 terminating
    [junit] 2010-10-29 15:04:47,490 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-29 15:04:47,490 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:49553, storageID=DS-1847995940-127.0.1.1-49553-1288364677423, infoPort=43144, ipcPort=60016)
    [junit] 2010-10-29 15:04:47,490 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-5088119518946758905_1001 src: /127.0.0.1:48322 dest: /127.0.0.1:37165
    [junit] 2010-10-29 15:04:47,491 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-5088119518946758905_1001
    [junit] 2010-10-29 15:04:47,490 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:49553, storageID=DS-1847995940-127.0.1.1-49553-1288364677423, infoPort=43144, ipcPort=60016)
    [junit] 2010-10-29 15:04:47,500 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:49553
    [junit] 2010-10-29 15:04:47,501 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-29 15:04:47,501 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-5088119518946758905_1001 src: /127.0.0.1:48084 dest: /127.0.0.1:49553
    [junit] 2010-10-29 15:04:47,501 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-5088119518946758905_1001
    [junit] 2010-10-29 15:04:47,502 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:37165
    [junit] 2010-10-29 15:04:47,512 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-5088119518946758905_1002 src: /127.0.0.1:48084 dest: /127.0.0.1:49553 of size 1
    [junit] 2010-10-29 15:04:47,512 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-5088119518946758905_1002 src: /127.0.0.1:48322 dest: /127.0.0.1:37165 of size 1
    [junit] 2010-10-29 15:04:47,513 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:49553 is added to blk_-5088119518946758905_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:37165|RBW], ReplicaUnderConstruction[127.0.0.1:49553|RBW], ReplicaUnderConstruction[127.0.0.1:33902|RBW]]} size 0
    [junit] 2010-10-29 15:04:47,514 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4553)) - updatePipeline(block=blk_-5088119518946758905_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:37165, 127.0.0.1:49553], clientName=DFSClient_-897504069)
    [junit] 2010-10-29 15:04:47,516 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4595)) - updatePipeline(blk_-5088119518946758905_1001) successfully to blk_-5088119518946758905_1002
    [junit] 2010-10-29 15:04:47,516 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:37165 is added to blk_-5088119518946758905_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:37165|RBW], ReplicaUnderConstruction[127.0.0.1:49553|RBW]]} size 1
    [junit] 2010-10-29 15:04:47,519 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_-897504069
    [junit] 2010-10-29 15:04:47,522 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-10-29 15:04:47,523 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:49553
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-10-29 15:04:47,525 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:49553, dest: /127.0.0.1:48085, bytes: 5, op: HDFS_READ, cliID: DFSClient_-897504069, offset: 0, srvID: DS-1847995940-127.0.1.1-49553-1288364677423, blockid: blk_-5088119518946758905_1002, duration: 262087
    [junit] 2010-10-29 15:04:47,525 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-10-29 15:04:47,525 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:49553
    [junit] 2010-10-29 15:04:47,627 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 60016
    [junit] 2010-10-29 15:04:47,627 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 60016: exiting
    [junit] 2010-10-29 15:04:47,628 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 60016
    [junit] 2010-10-29 15:04:47,628 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-29 15:04:47,628 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-29 15:04:47,628 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:49553, storageID=DS-1847995940-127.0.1.1-49553-1288364677423, infoPort=43144, ipcPort=60016):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-29 15:04:47,630 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-29 15:04:47,631 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-29 15:04:47,631 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:49553, storageID=DS-1847995940-127.0.1.1-49553-1288364677423, infoPort=43144, ipcPort=60016):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-10-29 15:04:47,631 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 60016
    [junit] 2010-10-29 15:04:47,632 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-29 15:04:47,632 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-29 15:04:47,632 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-29 15:04:47,633 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-29 15:04:47,633 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-10-29 15:04:47,735 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 56054
    [junit] 2010-10-29 15:04:47,735 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 56054: exiting
    [junit] 2010-10-29 15:04:47,735 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 56054
    [junit] 2010-10-29 15:04:47,736 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-29 15:04:47,736 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:33902, storageID=DS-201002006-127.0.1.1-33902-1288364677154, infoPort=47934, ipcPort=56054)
    [junit] 2010-10-29 15:04:47,736 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_-5088119518946758905_1001 0 : Thread is interrupted.
    [junit] 2010-10-29 15:04:47,736 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-10-29 15:04:47,736 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_-5088119518946758905_1001 terminating
    [junit] 2010-10-29 15:04:47,736 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:33902, storageID=DS-201002006-127.0.1.1-33902-1288364677154, infoPort=47934, ipcPort=56054)
    [junit] 2010-10-29 15:04:47,736 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:33902, storageID=DS-201002006-127.0.1.1-33902-1288364677154, infoPort=47934, ipcPort=56054):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-29 15:04:47,737 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:33902, storageID=DS-201002006-127.0.1.1-33902-1288364677154, infoPort=47934, ipcPort=56054):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-10-29 15:04:47,739 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-29 15:04:47,739 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-29 15:04:47,740 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:33902, storageID=DS-201002006-127.0.1.1-33902-1288364677154, infoPort=47934, ipcPort=56054):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-10-29 15:04:47,740 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 56054
    [junit] 2010-10-29 15:04:47,740 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-29 15:04:47,740 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-29 15:04:47,740 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-29 15:04:47,741 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-29 15:04:47,741 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-10-29 15:04:47,843 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 40830
    [junit] 2010-10-29 15:04:47,843 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 40830: exiting
    [junit] 2010-10-29 15:04:47,843 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 40830
    [junit] 2010-10-29 15:04:47,844 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-29 15:04:47,844 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:37165, storageID=DS-1039865817-127.0.1.1-37165-1288364676881, infoPort=42366, ipcPort=40830):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-29 15:04:47,843 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-29 15:04:47,846 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-29 15:04:47,920 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-29 15:04:47,947 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:37165, storageID=DS-1039865817-127.0.1.1-37165-1288364676881, infoPort=42366, ipcPort=40830):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-10-29 15:04:47,947 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 40830
    [junit] 2010-10-29 15:04:47,947 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-29 15:04:47,947 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-29 15:04:47,947 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-29 15:04:47,948 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-29 15:04:48,065 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-29 15:04:48,065 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-29 15:04:48,065 INFO namenode.FSNamesystem (FSEditLog.java:printStatistics(1122)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3
    [junit] 2010-10-29 15:04:48,067 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 33864
    [junit] 2010-10-29 15:04:48,067 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 33864: exiting
    [junit] 2010-10-29 15:04:48,068 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 33864: exiting
    [junit] 2010-10-29 15:04:48,068 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 33864: exiting
    [junit] 2010-10-29 15:04:48,067 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 33864: exiting
    [junit] 2010-10-29 15:04:48,068 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 33864: exiting
    [junit] 2010-10-29 15:04:48,068 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-29 15:04:48,068 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 33864: exiting
    [junit] 2010-10-29 15:04:48,068 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 33864
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 92.7 sec
    [junit] 2010-10-29 15:04:48,068 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 33864: exiting
    [junit] 2010-10-29 15:04:48,067 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 33864: exiting
    [junit] 2010-10-29 15:04:48,067 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 33864: exiting
    [junit] 2010-10-29 15:04:48,067 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 33864: exiting

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 211 minutes 3 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Oct 30, 2010 at 11:31 am
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/472/changes>

    Changes:

    [eli] Revert HDFS-259, need to update LAYOUT_VERSION.

    [shv] HDFS-1485. Fix typo in BlockPlacementPolicy. Contributed by Jingguo Yao.

    [eli] Add missing file in last commit (HDFS-1462).

    [eli] HDFS-1462. Refactor edit log loading to a separate class from edit log writing. Contributed by Todd Lipcon.

    [eli] HDFS-259. Remove intentionally corrupt 0.13 directory layout creation. Contributed by Todd Lipcon.

    ------------------------------------------
    [...truncated 814 lines...]
    A src/c++/libhdfs/m4/libtool.m4
    A src/c++/libhdfs/m4/ltversion.m4
    A src/c++/libhdfs/m4/apfunctions.m4
    A src/c++/libhdfs/m4/lt~obsolete.m4
    A src/c++/libhdfs/m4/ltoptions.m4
    A src/c++/libhdfs/m4/apsupport.m4
    A src/c++/libhdfs/configure
    AU src/c++/libhdfs/Makefile.in
    A src/c++/libhdfs/depcomp
    A src/c++/libhdfs/docs
    A src/c++/libhdfs/docs/Doxyfile
    A src/c++/libhdfs/docs/libhdfs_footer.html
    A src/c++/libhdfs/config.guess
    AU src/c++/libhdfs/ltmain.sh
    A src/c++/libhdfs/config.sub
    A src/c++/libhdfs/hdfs_read.c
    A src/c++/libhdfs/tests
    AU src/c++/libhdfs/tests/test-libhdfs.sh
    A src/c++/libhdfs/tests/conf
    A src/c++/libhdfs/configure.ac
    A src/c++/libhdfs/hdfs_test.c
    A src/c++/libhdfs/hdfs.c
    A src/c++/libhdfs/hdfsJniHelper.c
    AU src/c++/libhdfs/Makefile.am
    A src/c++/libhdfs/missing
    A src/c++/libhdfs/hdfs.h
    A src/c++/libhdfs/hdfsJniHelper.h
    A src/c++/libhdfs/aclocal.m4
    A src/c++/libhdfs/install-sh
    A src/docs
    A src/docs/forrest.properties
    A src/docs/status.xml
    A src/docs/src
    A src/docs/src/documentation
    A src/docs/src/documentation/conf
    A src/docs/src/documentation/conf/cli.xconf
    A src/docs/src/documentation/skinconf.xml
    A src/docs/src/documentation/content
    A src/docs/src/documentation/content/xdocs
    A src/docs/src/documentation/content/xdocs/SLG_user_guide.xml
    A src/docs/src/documentation/content/xdocs/hdfs_quota_admin_guide.xml
    A src/docs/src/documentation/content/xdocs/site.xml
    A src/docs/src/documentation/content/xdocs/faultinject_framework.xml
    A src/docs/src/documentation/content/xdocs/hdfsproxy.xml
    A src/docs/src/documentation/content/xdocs/index.xml
    A src/docs/src/documentation/content/xdocs/hdfs_imageviewer.xml
    A src/docs/src/documentation/content/xdocs/tabs.xml
    A src/docs/src/documentation/content/xdocs/libhdfs.xml
    A src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
    A src/docs/src/documentation/content/xdocs/hdfs_design.xml
    A src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
    A src/docs/src/documentation/resources
    A src/docs/src/documentation/resources/images
    AU src/docs/src/documentation/resources/images/hdfsdatanodes.odg
    AU src/docs/src/documentation/resources/images/request-identify.jpg
    AU src/docs/src/documentation/resources/images/architecture.gif
    AU src/docs/src/documentation/resources/images/hadoop-logo-big.jpg
    AU src/docs/src/documentation/resources/images/hadoop-logo.jpg
    AU src/docs/src/documentation/resources/images/core-logo.gif
    AU src/docs/src/documentation/resources/images/hdfsdatanodes.png
    AU src/docs/src/documentation/resources/images/hdfsarchitecture.gif
    AU src/docs/src/documentation/resources/images/FI-framework.gif
    AU src/docs/src/documentation/resources/images/favicon.ico
    AU src/docs/src/documentation/resources/images/hdfsarchitecture.odg
    AU src/docs/src/documentation/resources/images/FI-framework.odg
    AU src/docs/src/documentation/resources/images/hdfs-logo.jpg
    AU src/docs/src/documentation/resources/images/hdfsproxy-forward.jpg
    AU src/docs/src/documentation/resources/images/hdfsproxy-server.jpg
    AU src/docs/src/documentation/resources/images/hdfsproxy-overview.jpg
    AU src/docs/src/documentation/resources/images/hdfsarchitecture.png
    AU src/docs/src/documentation/resources/images/hdfsdatanodes.gif
    A src/docs/src/documentation/README.txt
    A src/docs/src/documentation/classes
    A src/docs/src/documentation/classes/CatalogManager.properties
    A src/docs/changes
    A src/docs/changes/ChangesFancyStyle.css
    AU src/docs/changes/changes2html.pl
    A src/docs/changes/ChangesSimpleStyle.css
    A src/docs/releasenotes.html
    A bin
    A bin/hdfs-config.sh
    AU bin/start-dfs.sh
    AU bin/stop-balancer.sh
    AU bin/hdfs
    A bin/stop-secure-dns.sh
    AU bin/stop-dfs.sh
    AU bin/start-balancer.sh
    A bin/start-secure-dns.sh
    AU build.xml
    U .
    Fetching 'https://svn.apache.org/repos/asf/hadoop/common/trunk/src/test/bin' at -1 into '<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/src/test/bin'>
    AU src/test/bin/test-patch.sh
    At revision 1029030
    At revision 1029030
    Checking out http://svn.apache.org/repos/asf/hadoop/nightly
    A commitBuild.sh
    A hudsonEnv.sh
    AU hudsonBuildHadoopNightly.sh
    AU hudsonBuildHadoopPatch.sh
    AU hudsonBuildHadoopRelease.sh
    AU processHadoopPatchEmailRemote.sh
    AU hudsonPatchQueueAdmin.sh
    AU processHadoopPatchEmail.sh
    A README.txt
    A test-patch
    A test-patch/test-patch.sh
    At revision 1029030
    no change for http://svn.apache.org/repos/asf/hadoop/nightly since the previous build
    no change for https://svn.apache.org/repos/asf/hadoop/common/trunk/src/test/bin since the previous build
    [Hadoop-Hdfs-trunk] $ /bin/bash /tmp/hudson1057861429925166567.sh
    Buildfile: build.xml

    clean-contrib:

    clean:

    check-libhdfs-fuse:

    clean:
    Trying to override old definition of task macro_tar

    clean:
    [echo] contrib: hdfsproxy

    clean:
    [echo] contrib: thriftfs

    clean-fi:

    clean:

    BUILD SUCCESSFUL
    Total time: 0 seconds
    Buildfile: build.xml

    clean-contrib:

    clean:

    check-libhdfs-fuse:

    clean:
    Trying to override old definition of task macro_tar

    clean:
    [echo] contrib: hdfsproxy

    clean:
    [echo] contrib: thriftfs

    clean-fi:

    clean:

    clean-contrib:

    clean:

    check-libhdfs-fuse:

    clean:
    Trying to override old definition of task macro_tar

    clean:
    [echo] contrib: hdfsproxy

    clean:
    [echo] contrib: thriftfs

    clean-fi:

    clean:

    clean-cache:
    [delete] Deleting directory /homes/hudson/.ivy2/cache/org.apache.hadoop

    clover.setup:

    clover.info:

    clover:

    ivy-download:
    [get] Getting: http://repo2.maven.org/maven2/org/apache/ivy/ivy/2.1.0/ivy-2.1.0.jar
    [get] To: <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/ivy/ivy-2.1.0.jar>

    ivy-init-dirs:
    [mkdir] Created dir: <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build/ivy>
    [mkdir] Created dir: <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build/ivy/lib>
    [mkdir] Created dir: <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build/ivy/report>
    [mkdir] Created dir: <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build/ivy/maven>

    ivy-probe-antlib:

    ivy-init-antlib:

    ivy-init:
    [ivy:configure] :: Ivy 2.1.0 - 20090925235825 :: http://ant.apache.org/ivy/ ::
    [ivy:configure] :: loading settings :: file = <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/ivy/ivysettings.xml>

    ivy-resolve-common:
    [ivy:resolve] downloading http://repo1.maven.org/maven2/org/apache/hadoop/avro/1.3.2/avro-1.3.2.jar ...
    [ivy:resolve] .............................................................................................................................................................................................................. (331kB)
    [ivy:resolve] .. (0kB)
    [ivy:resolve] [SUCCESSFUL ] org.apache.hadoop#avro;1.3.2!avro.jar (1507ms)
    [ivy:resolve]
    [ivy:resolve] :: problems summary ::
    [ivy:resolve] :::: WARNINGS
    [ivy:resolve] module not found: org.apache.hadoop#hadoop-common;0.22.0-SNAPSHOT
    [ivy:resolve] ==== apache-snapshot: tried
    [ivy:resolve] https://repository.apache.org/content/repositories/snapshots/org/apache/hadoop/hadoop-common/0.22.0-SNAPSHOT/hadoop-common-0.22.0-SNAPSHOT.pom
    [ivy:resolve] -- artifact org.apache.hadoop#hadoop-common;0.22.0-SNAPSHOT!hadoop-common.jar:
    [ivy:resolve] https://repository.apache.org/content/repositories/snapshots/org/apache/hadoop/hadoop-common/0.22.0-SNAPSHOT/hadoop-common-0.22.0-SNAPSHOT.jar
    [ivy:resolve] ==== maven2: tried
    [ivy:resolve] http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-common/0.22.0-SNAPSHOT/hadoop-common-0.22.0-SNAPSHOT.pom
    [ivy:resolve] -- artifact org.apache.hadoop#hadoop-common;0.22.0-SNAPSHOT!hadoop-common.jar:
    [ivy:resolve] http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-common/0.22.0-SNAPSHOT/hadoop-common-0.22.0-SNAPSHOT.jar
    [ivy:resolve] ::::::::::::::::::::::::::::::::::::::::::::::
    [ivy:resolve] :: UNRESOLVED DEPENDENCIES ::
    [ivy:resolve] ::::::::::::::::::::::::::::::::::::::::::::::
    [ivy:resolve] :: org.apache.hadoop#hadoop-common;0.22.0-SNAPSHOT: not found
    [ivy:resolve] ::::::::::::::::::::::::::::::::::::::::::::::
    [ivy:resolve] :::: ERRORS
    [ivy:resolve] SERVER ERROR: Bad Gateway url=https://repository.apache.org/content/repositories/snapshots/org/apache/hadoop/hadoop-common/0.22.0-SNAPSHOT/maven-metadata.xml
    [ivy:resolve] SERVER ERROR: Bad Gateway url=https://repository.apache.org/content/repositories/snapshots/org/apache/hadoop/hadoop-common/0.22.0-SNAPSHOT/hadoop-common-0.22.0-SNAPSHOT.pom
    [ivy:resolve] SERVER ERROR: Bad Gateway url=https://repository.apache.org/content/repositories/snapshots/org/apache/hadoop/hadoop-common/0.22.0-SNAPSHOT/hadoop-common-0.22.0-SNAPSHOT.jar
    [ivy:resolve] SERVER ERROR: Bad Gateway url=https://repository.apache.org/content/repositories/snapshots/org/apache/hadoop/avro/1.3.2/avro-1.3.2.pom
    [ivy:resolve] SERVER ERROR: Bad Gateway url=https://repository.apache.org/content/repositories/snapshots/org/apache/hadoop/avro/1.3.2/avro-1.3.2.jar
    [ivy:resolve] SERVER ERROR: Bad Gateway url=https://repository.apache.org/content/repositories/snapshots/org/apache/hadoop/avro/1.3.2/avro-1.3.2-sources.jar
    [ivy:resolve] SERVER ERROR: Bad Gateway url=https://repository.apache.org/content/repositories/snapshots/org/apache/hadoop/avro/1.3.2/avro-1.3.2-javadoc.jar
    [ivy:resolve]
    [ivy:resolve] :: USE VERBOSE OR DEBUG MESSAGE LEVEL FOR MORE DETAILS

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:1551: impossible to resolve dependencies:
    resolve failed - see output for details

    Total time: 8 seconds
    mv: cannot stat `build/*.tar.tgz': No such file or directory
    mv: cannot stat `build/*.jar': No such file or directory
    mv: cannot stat `build/test/findbugs': No such file or directory
    mv: cannot stat `build/docs/api': No such file or directory
    Build Failed
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Oct 31, 2010 at 3:37 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/473/>

    ------------------------------------------
    [...truncated 764758 lines...]
    [junit] 2010-10-31 15:37:46,243 INFO hdfs.DFSClientAspects (DFSClientAspects.aj:ajc$before$org_apache_hadoop_hdfs_DFSClientAspects$4$3d82fab0(73)) - FI: before pipelineErrorAfterInit: errorIndex=2
    [junit] 2010-10-31 15:37:46,244 WARN hdfs.DFSClient (DFSOutputStream.java:setupPipelineForAppendOrRecovery(776)) - Error Recovery for block blk_3464702091918948980_1001 in pipeline 127.0.0.1:59442, 127.0.0.1:34195, 127.0.0.1:54575: bad datanode 127.0.0.1:54575
    [junit] 2010-10-31 15:37:46,244 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$8$9594fb70(205)) - FI: fiPipelineAck, datanode=DatanodeRegistration(127.0.0.1:59442, storageID=DS-759794399-127.0.1.1-59442-1288539456090, infoPort=55517, ipcPort=51019)
    [junit] 2010-10-31 15:37:46,244 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-31 15:37:46,245 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-31 15:37:46,245 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_3464702091918948980_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-31 15:37:46,246 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_3464702091918948980_1001 terminating
    [junit] 2010-10-31 15:37:46,248 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:59442, storageID=DS-759794399-127.0.1.1-59442-1288539456090, infoPort=55517, ipcPort=51019)
    [junit] 2010-10-31 15:37:46,259 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-31 15:37:46,259 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-10-31 15:37:46,260 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_3464702091918948980_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-31 15:37:46,260 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_3464702091918948980_1001 terminating
    [junit] 2010-10-31 15:37:46,259 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:59442, storageID=DS-759794399-127.0.1.1-59442-1288539456090, infoPort=55517, ipcPort=51019)
    [junit] 2010-10-31 15:37:46,261 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:59442
    [junit] 2010-10-31 15:37:46,261 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:34195, storageID=DS-1050017901-127.0.1.1-34195-1288539455555, infoPort=34848, ipcPort=55617)
    [junit] 2010-10-31 15:37:46,262 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-31 15:37:46,262 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_3464702091918948980_1001 src: /127.0.0.1:39400 dest: /127.0.0.1:59442
    [junit] 2010-10-31 15:37:46,262 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:34195, storageID=DS-1050017901-127.0.1.1-34195-1288539455555, infoPort=34848, ipcPort=55617)
    [junit] 2010-10-31 15:37:46,263 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_3464702091918948980_1001
    [junit] 2010-10-31 15:37:46,264 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:34195
    [junit] 2010-10-31 15:37:46,264 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-10-31 15:37:46,264 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_3464702091918948980_1001 src: /127.0.0.1:50265 dest: /127.0.0.1:34195
    [junit] 2010-10-31 15:37:46,265 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_3464702091918948980_1001
    [junit] 2010-10-31 15:37:46,265 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:59442
    [junit] 2010-10-31 15:37:46,265 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_3464702091918948980_1002 src: /127.0.0.1:50265 dest: /127.0.0.1:34195 of size 1
    [junit] 2010-10-31 15:37:46,267 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:34195 is added to blk_3464702091918948980_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:59442|RBW], ReplicaUnderConstruction[127.0.0.1:34195|RBW], ReplicaUnderConstruction[127.0.0.1:54575|RBW]]} size 0
    [junit] 2010-10-31 15:37:46,266 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_3464702091918948980_1002 src: /127.0.0.1:39400 dest: /127.0.0.1:59442 of size 1
    [junit] 2010-10-31 15:37:46,268 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4569)) - updatePipeline(block=blk_3464702091918948980_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:59442, 127.0.0.1:34195], clientName=DFSClient_2074721995)
    [junit] 2010-10-31 15:37:46,271 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4611)) - updatePipeline(blk_3464702091918948980_1001) successfully to blk_3464702091918948980_1002
    [junit] 2010-10-31 15:37:46,271 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:59442 is added to blk_3464702091918948980_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:59442|RBW], ReplicaUnderConstruction[127.0.0.1:34195|RBW]]} size 1
    [junit] 2010-10-31 15:37:46,274 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_2074721995
    [junit] 2010-10-31 15:37:46,276 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-10-31 15:37:46,278 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:59442
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-10-31 15:37:46,279 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:59442, dest: /127.0.0.1:39402, bytes: 5, op: HDFS_READ, cliID: DFSClient_2074721995, offset: 0, srvID: DS-759794399-127.0.1.1-59442-1288539456090, blockid: blk_3464702091918948980_1002, duration: 267817
    [junit] 2010-10-31 15:37:46,279 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-10-31 15:37:46,279 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:59442
    [junit] 2010-10-31 15:37:46,381 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 51019
    [junit] 2010-10-31 15:37:46,382 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 51019: exiting
    [junit] 2010-10-31 15:37:46,382 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-31 15:37:46,382 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:59442, storageID=DS-759794399-127.0.1.1-59442-1288539456090, infoPort=55517, ipcPort=51019):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-31 15:37:46,382 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 51019
    [junit] 2010-10-31 15:37:46,383 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-31 15:37:46,383 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-31 15:37:46,384 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:59442, storageID=DS-759794399-127.0.1.1-59442-1288539456090, infoPort=55517, ipcPort=51019):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-10-31 15:37:46,384 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 51019
    [junit] 2010-10-31 15:37:46,384 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-31 15:37:46,384 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-31 15:37:46,385 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-31 15:37:46,385 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-31 15:37:46,386 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-10-31 15:37:46,488 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 35505
    [junit] 2010-10-31 15:37:46,488 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 35505: exiting
    [junit] 2010-10-31 15:37:46,488 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 35505
    [junit] 2010-10-31 15:37:46,488 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-31 15:37:46,489 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_3464702091918948980_1001 0 : Thread is interrupted.
    [junit] 2010-10-31 15:37:46,489 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-10-31 15:37:46,489 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:54575, storageID=DS-729373414-127.0.1.1-54575-1288539455832, infoPort=34440, ipcPort=35505)
    [junit] 2010-10-31 15:37:46,489 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:54575, storageID=DS-729373414-127.0.1.1-54575-1288539455832, infoPort=34440, ipcPort=35505):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-31 15:37:46,489 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_3464702091918948980_1001 terminating
    [junit] 2010-10-31 15:37:46,490 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:54575, storageID=DS-729373414-127.0.1.1-54575-1288539455832, infoPort=34440, ipcPort=35505)
    [junit] 2010-10-31 15:37:46,491 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:54575, storageID=DS-729373414-127.0.1.1-54575-1288539455832, infoPort=34440, ipcPort=35505):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-10-31 15:37:46,491 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-31 15:37:46,492 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-31 15:37:46,492 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:54575, storageID=DS-729373414-127.0.1.1-54575-1288539455832, infoPort=34440, ipcPort=35505):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-10-31 15:37:46,492 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 35505
    [junit] 2010-10-31 15:37:46,493 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-31 15:37:46,493 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-31 15:37:46,493 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-31 15:37:46,493 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-31 15:37:46,494 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-10-31 15:37:46,595 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 55617
    [junit] 2010-10-31 15:37:46,596 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 55617: exiting
    [junit] 2010-10-31 15:37:46,596 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 55617
    [junit] 2010-10-31 15:37:46,596 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-10-31 15:37:46,596 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:34195, storageID=DS-1050017901-127.0.1.1-34195-1288539455555, infoPort=34848, ipcPort=55617):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-10-31 15:37:46,596 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-10-31 15:37:46,597 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-10-31 15:37:46,598 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:34195, storageID=DS-1050017901-127.0.1.1-34195-1288539455555, infoPort=34848, ipcPort=55617):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-10-31 15:37:46,598 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 55617
    [junit] 2010-10-31 15:37:46,598 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-10-31 15:37:46,598 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-10-31 15:37:46,598 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-10-31 15:37:46,599 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-10-31 15:37:46,713 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-31 15:37:46,713 INFO namenode.FSEditLog (FSEditLog.java:printStatistics(630)) - Number of transactions: 6 Total time for transactions(ms): 3Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 3
    [junit] 2010-10-31 15:37:46,713 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-10-31 15:37:46,715 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 47022
    [junit] 2010-10-31 15:37:46,715 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 47022: exiting
    [junit] 2010-10-31 15:37:46,715 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 47022: exiting
    [junit] 2010-10-31 15:37:46,716 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 47022: exiting
    [junit] 2010-10-31 15:37:46,716 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 47022: exiting
    [junit] 2010-10-31 15:37:46,716 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 47022: exiting
    [junit] 2010-10-31 15:37:46,716 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 47022: exiting
    [junit] 2010-10-31 15:37:46,716 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 47022: exiting
    [junit] 2010-10-31 15:37:46,716 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 47022: exiting
    [junit] 2010-10-31 15:37:46,717 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 47022: exiting
    [junit] 2010-10-31 15:37:46,716 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 47022: exiting
    [junit] 2010-10-31 15:37:46,717 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 47022
    [junit] 2010-10-31 15:37:46,717 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 97.586 sec

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 243 minutes 59 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Nov 1, 2010 at 3:31 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/474/>

    ------------------------------------------
    [...truncated 769128 lines...]
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-01 15:32:17,476 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Broken pipe
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-01 15:32:17,477 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-1524285420535303467_1001 2 Exception java.io.IOException: Broken pipe
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-01 15:32:17,478 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_-1524285420535303467_1001 terminating
    [junit] 2010-11-01 15:32:17,478 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:38601, storageID=DS-1255306366-127.0.1.1-38601-1288625526672, infoPort=50238, ipcPort=45310)
    [junit] 2010-11-01 15:32:17,478 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:38601, storageID=DS-1255306366-127.0.1.1-38601-1288625526672, infoPort=50238, ipcPort=45310)
    [junit] 2010-11-01 15:32:17,478 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-01 15:32:17,479 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-01 15:32:17,480 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-1524285420535303467_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-01 15:32:17,480 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_-1524285420535303467_1001 terminating
    [junit] 2010-11-01 15:32:17,491 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:40559, storageID=DS-1648641766-127.0.1.1-40559-1288625527061, infoPort=60450, ipcPort=34264)
    [junit] 2010-11-01 15:32:17,491 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:38601
    [junit] 2010-11-01 15:32:17,491 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:40559, storageID=DS-1648641766-127.0.1.1-40559-1288625527061, infoPort=60450, ipcPort=34264)
    [junit] 2010-11-01 15:32:17,492 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-01 15:32:17,493 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-1524285420535303467_1001 src: /127.0.0.1:43975 dest: /127.0.0.1:38601
    [junit] 2010-11-01 15:32:17,493 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-1524285420535303467_1001
    [junit] 2010-11-01 15:32:17,494 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:40559
    [junit] 2010-11-01 15:32:17,494 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-01 15:32:17,494 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-1524285420535303467_1001 src: /127.0.0.1:46025 dest: /127.0.0.1:40559
    [junit] 2010-11-01 15:32:17,495 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-1524285420535303467_1001
    [junit] 2010-11-01 15:32:17,495 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:38601
    [junit] 2010-11-01 15:32:17,496 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-1524285420535303467_1002 src: /127.0.0.1:46025 dest: /127.0.0.1:40559 of size 1
    [junit] 2010-11-01 15:32:17,496 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-1524285420535303467_1002 src: /127.0.0.1:43975 dest: /127.0.0.1:38601 of size 1
    [junit] 2010-11-01 15:32:17,497 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:40559 is added to blk_-1524285420535303467_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:38601|RBW], ReplicaUnderConstruction[127.0.0.1:40559|RBW], ReplicaUnderConstruction[127.0.0.1:47820|RBW]]} size 0
    [junit] 2010-11-01 15:32:17,498 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4569)) - updatePipeline(block=blk_-1524285420535303467_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:38601, 127.0.0.1:40559], clientName=DFSClient_-112756678)
    [junit] 2010-11-01 15:32:17,500 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4611)) - updatePipeline(blk_-1524285420535303467_1001) successfully to blk_-1524285420535303467_1002
    [junit] 2010-11-01 15:32:17,500 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:38601 is added to blk_-1524285420535303467_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:38601|RBW], ReplicaUnderConstruction[127.0.0.1:40559|RBW]]} size 1
    [junit] 2010-11-01 15:32:17,503 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_-112756678
    [junit] 2010-11-01 15:32:17,506 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-11-01 15:32:17,507 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:40559
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-11-01 15:32:17,509 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:40559, dest: /127.0.0.1:46026, bytes: 5, op: HDFS_READ, cliID: DFSClient_-112756678, offset: 0, srvID: DS-1648641766-127.0.1.1-40559-1288625527061, blockid: blk_-1524285420535303467_1002, duration: 281239
    [junit] 2010-11-01 15:32:17,509 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-11-01 15:32:17,509 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:40559
    [junit] 2010-11-01 15:32:17,621 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 56661
    [junit] 2010-11-01 15:32:17,621 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 56661: exiting
    [junit] 2010-11-01 15:32:17,621 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 56661
    [junit] 2010-11-01 15:32:17,621 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-01 15:32:17,622 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:47820, storageID=DS-1738017040-127.0.1.1-47820-1288625527318, infoPort=55694, ipcPort=56661):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-01 15:32:17,622 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:47820, storageID=DS-1738017040-127.0.1.1-47820-1288625527318, infoPort=55694, ipcPort=56661)
    [junit] 2010-11-01 15:32:17,622 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_-1524285420535303467_1001 0 : Thread is interrupted.
    [junit] 2010-11-01 15:32:17,622 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-11-01 15:32:17,623 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_-1524285420535303467_1001 terminating
    [junit] 2010-11-01 15:32:17,622 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:47820, storageID=DS-1738017040-127.0.1.1-47820-1288625527318, infoPort=55694, ipcPort=56661)
    [junit] 2010-11-01 15:32:17,623 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:47820, storageID=DS-1738017040-127.0.1.1-47820-1288625527318, infoPort=55694, ipcPort=56661):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-11-01 15:32:17,625 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-01 15:32:17,625 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-01 15:32:17,626 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:47820, storageID=DS-1738017040-127.0.1.1-47820-1288625527318, infoPort=55694, ipcPort=56661):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-11-01 15:32:17,626 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 56661
    [junit] 2010-11-01 15:32:17,626 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-01 15:32:17,626 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-01 15:32:17,627 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-01 15:32:17,627 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-01 15:32:17,628 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-11-01 15:32:17,735 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 34264
    [junit] 2010-11-01 15:32:17,736 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 34264: exiting
    [junit] 2010-11-01 15:32:17,736 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 34264
    [junit] 2010-11-01 15:32:17,736 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-01 15:32:17,736 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:40559, storageID=DS-1648641766-127.0.1.1-40559-1288625527061, infoPort=60450, ipcPort=34264):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-01 15:32:17,736 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-11-01 15:32:17,737 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-01 15:32:17,737 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:40559, storageID=DS-1648641766-127.0.1.1-40559-1288625527061, infoPort=60450, ipcPort=34264):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-11-01 15:32:17,738 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 34264
    [junit] 2010-11-01 15:32:17,738 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-01 15:32:17,738 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-01 15:32:17,738 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-01 15:32:17,739 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-01 15:32:17,739 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-11-01 15:32:17,841 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 45310
    [junit] 2010-11-01 15:32:17,841 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 45310: exiting
    [junit] 2010-11-01 15:32:17,841 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-01 15:32:17,841 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:38601, storageID=DS-1255306366-127.0.1.1-38601-1288625526672, infoPort=50238, ipcPort=45310):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-01 15:32:17,842 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 45310
    [junit] 2010-11-01 15:32:17,841 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-11-01 15:32:17,943 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-01 15:32:17,943 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:38601, storageID=DS-1255306366-127.0.1.1-38601-1288625526672, infoPort=50238, ipcPort=45310):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-11-01 15:32:17,943 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 45310
    [junit] 2010-11-01 15:32:17,943 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-01 15:32:17,944 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-01 15:32:17,944 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-01 15:32:17,944 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-01 15:32:18,046 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-01 15:32:18,046 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-01 15:32:18,046 INFO namenode.FSEditLog (FSEditLog.java:printStatistics(630)) - Number of transactions: 6 Total time for transactions(ms): 3Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 3
    [junit] 2010-11-01 15:32:18,048 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 52053
    [junit] 2010-11-01 15:32:18,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 52053: exiting
    [junit] 2010-11-01 15:32:18,049 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-01 15:32:18,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 52053: exiting
    [junit] 2010-11-01 15:32:18,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 52053: exiting
    [junit] 2010-11-01 15:32:18,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 52053: exiting
    [junit] 2010-11-01 15:32:18,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 52053: exiting
    [junit] 2010-11-01 15:32:18,049 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 52053
    [junit] 2010-11-01 15:32:18,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 52053: exiting
    [junit] 2010-11-01 15:32:18,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 52053: exiting
    [junit] 2010-11-01 15:32:18,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 52053: exiting
    [junit] 2010-11-01 15:32:18,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 52053: exiting
    [junit] 2010-11-01 15:32:18,049 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 52053: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 73.446 sec

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 238 minutes 27 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Nov 2, 2010 at 3:34 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/475/changes>

    Changes:

    [nigel] HADOOP-7008. Enable test-patch.sh to have a configured number of acceptable findbugs and javadoc warnings. Contributed by nigel and gkesavan.

    ------------------------------------------
    [...truncated 765912 lines...]
    [junit] 2010-11-02 15:35:45,473 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-02 15:35:45,475 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-02 15:35:45,480 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_9008651265902800917_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-02 15:35:45,481 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:54062
    [junit] 2010-11-02 15:35:45,481 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_9008651265902800917_1001 terminating
    [junit] 2010-11-02 15:35:45,481 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-02 15:35:45,481 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-02 15:35:45,481 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:54062, storageID=DS-1826774553-127.0.1.1-54062-1288712135419, infoPort=44718, ipcPort=52974)
    [junit] 2010-11-02 15:35:45,483 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:54062, storageID=DS-1826774553-127.0.1.1-54062-1288712135419, infoPort=44718, ipcPort=52974)
    [junit] 2010-11-02 15:35:45,483 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-02 15:35:45,482 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_9008651265902800917_1001 src: /127.0.0.1:42633 dest: /127.0.0.1:54062
    [junit] 2010-11-02 15:35:45,484 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_9008651265902800917_1001
    [junit] 2010-11-02 15:35:45,484 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_9008651265902800917_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-02 15:35:45,485 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_9008651265902800917_1001 terminating
    [junit] 2010-11-02 15:35:45,485 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:57426, storageID=DS-1786929572-127.0.1.1-57426-1288712135161, infoPort=50113, ipcPort=58796)
    [junit] 2010-11-02 15:35:45,486 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:57426, storageID=DS-1786929572-127.0.1.1-57426-1288712135161, infoPort=50113, ipcPort=58796)
    [junit] 2010-11-02 15:35:45,486 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:57426
    [junit] 2010-11-02 15:35:45,486 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-02 15:35:45,487 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_9008651265902800917_1001 src: /127.0.0.1:51851 dest: /127.0.0.1:57426
    [junit] 2010-11-02 15:35:45,487 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_9008651265902800917_1001
    [junit] 2010-11-02 15:35:45,487 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:54062
    [junit] 2010-11-02 15:35:45,488 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_9008651265902800917_1002 src: /127.0.0.1:51851 dest: /127.0.0.1:57426 of size 1
    [junit] 2010-11-02 15:35:45,488 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_9008651265902800917_1002 src: /127.0.0.1:42633 dest: /127.0.0.1:54062 of size 1
    [junit] 2010-11-02 15:35:45,490 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:57426 is added to blk_9008651265902800917_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:54062|RBW], ReplicaUnderConstruction[127.0.0.1:57426|RBW], ReplicaUnderConstruction[127.0.0.1:45202|RBW]]} size 0
    [junit] 2010-11-02 15:35:45,490 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4569)) - updatePipeline(block=blk_9008651265902800917_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:54062, 127.0.0.1:57426], clientName=DFSClient_1126677593)
    [junit] 2010-11-02 15:35:45,493 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4611)) - updatePipeline(blk_9008651265902800917_1001) successfully to blk_9008651265902800917_1002
    [junit] 2010-11-02 15:35:45,493 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:54062 is added to blk_9008651265902800917_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:54062|RBW], ReplicaUnderConstruction[127.0.0.1:57426|RBW]]} size 1
    [junit] 2010-11-02 15:35:45,496 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_1126677593
    [junit] 2010-11-02 15:35:45,498 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-11-02 15:35:45,500 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:54062
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-11-02 15:35:45,501 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:54062, dest: /127.0.0.1:42635, bytes: 5, op: HDFS_READ, cliID: DFSClient_1126677593, offset: 0, srvID: DS-1826774553-127.0.1.1-54062-1288712135419, blockid: blk_9008651265902800917_1002, duration: 266413
    [junit] 2010-11-02 15:35:45,502 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-11-02 15:35:45,502 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:54062
    [junit] 2010-11-02 15:35:45,604 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 52974
    [junit] 2010-11-02 15:35:45,604 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 52974: exiting
    [junit] 2010-11-02 15:35:45,604 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 52974
    [junit] 2010-11-02 15:35:45,604 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-02 15:35:45,605 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:54062, storageID=DS-1826774553-127.0.1.1-54062-1288712135419, infoPort=44718, ipcPort=52974):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-02 15:35:45,604 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-11-02 15:35:45,606 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-02 15:35:45,606 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:54062, storageID=DS-1826774553-127.0.1.1-54062-1288712135419, infoPort=44718, ipcPort=52974):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-11-02 15:35:45,606 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 52974
    [junit] 2010-11-02 15:35:45,606 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-02 15:35:45,607 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-02 15:35:45,607 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-02 15:35:45,607 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-02 15:35:45,608 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-11-02 15:35:45,720 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 58796
    [junit] 2010-11-02 15:35:45,721 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 58796: exiting
    [junit] 2010-11-02 15:35:45,721 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 58796
    [junit] 2010-11-02 15:35:45,721 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:57426, storageID=DS-1786929572-127.0.1.1-57426-1288712135161, infoPort=50113, ipcPort=58796):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-02 15:35:45,721 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-11-02 15:35:45,721 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-02 15:35:45,722 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-02 15:35:45,723 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:57426, storageID=DS-1786929572-127.0.1.1-57426-1288712135161, infoPort=50113, ipcPort=58796):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-11-02 15:35:45,723 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 58796
    [junit] 2010-11-02 15:35:45,723 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-02 15:35:45,724 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-02 15:35:45,724 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-02 15:35:45,724 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-02 15:35:45,724 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-11-02 15:35:45,826 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 50858
    [junit] 2010-11-02 15:35:45,826 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 50858: exiting
    [junit] 2010-11-02 15:35:45,826 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 50858
    [junit] 2010-11-02 15:35:45,827 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-11-02 15:35:45,826 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-02 15:35:45,827 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:45202, storageID=DS-514509845-127.0.1.1-45202-1288712134895, infoPort=54426, ipcPort=50858)
    [junit] 2010-11-02 15:35:45,827 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:45202, storageID=DS-514509845-127.0.1.1-45202-1288712134895, infoPort=54426, ipcPort=50858)
    [junit] 2010-11-02 15:35:45,827 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_9008651265902800917_1001 0 : Thread is interrupted.
    [junit] 2010-11-02 15:35:45,827 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:45202, storageID=DS-514509845-127.0.1.1-45202-1288712134895, infoPort=54426, ipcPort=50858):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-02 15:35:45,828 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_9008651265902800917_1001 terminating
    [junit] 2010-11-02 15:35:45,828 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:45202, storageID=DS-514509845-127.0.1.1-45202-1288712134895, infoPort=54426, ipcPort=50858):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-11-02 15:35:45,829 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-02 15:35:45,930 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-02 15:35:45,930 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:45202, storageID=DS-514509845-127.0.1.1-45202-1288712134895, infoPort=54426, ipcPort=50858):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-11-02 15:35:45,930 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 50858
    [junit] 2010-11-02 15:35:45,930 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-02 15:35:45,930 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-02 15:35:45,931 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-02 15:35:45,931 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-02 15:35:46,033 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-02 15:35:46,033 INFO namenode.FSEditLog (FSEditLog.java:printStatistics(630)) - Number of transactions: 6 Total time for transactions(ms): 3Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3
    [junit] 2010-11-02 15:35:46,033 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-02 15:35:46,035 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 50184
    [junit] 2010-11-02 15:35:46,035 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 50184: exiting
    [junit] 2010-11-02 15:35:46,035 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-02 15:35:46,036 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 50184: exiting
    [junit] 2010-11-02 15:35:46,035 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 50184: exiting
    [junit] 2010-11-02 15:35:46,035 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 50184
    [junit] 2010-11-02 15:35:46,037 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 50184: exiting
    [junit] 2010-11-02 15:35:46,035 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 50184: exiting
    [junit] 2010-11-02 15:35:46,035 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 50184: exiting
    [junit] 2010-11-02 15:35:46,035 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 50184: exiting
    [junit] 2010-11-02 15:35:46,038 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 50184: exiting
    [junit] 2010-11-02 15:35:46,038 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 50184: exiting
    [junit] 2010-11-02 15:35:46,037 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 50184: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 72.985 sec

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 240 minutes 59 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Nov 3, 2010 at 2:45 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/476/>

    ------------------------------------------
    [...truncated 613498 lines...]
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-03 14:46:47,449 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-03 14:46:47,450 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-6689290206940956324_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-03 14:46:47,450 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_-6689290206940956324_1001 terminating
    [junit] 2010-11-03 14:46:47,451 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:55443, storageID=DS-1295453364-127.0.1.1-55443-1288795596836, infoPort=58617, ipcPort=48866)
    [junit] 2010-11-03 14:46:47,451 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-03 14:46:47,452 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-03 14:46:47,453 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-6689290206940956324_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-03 14:46:47,452 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:55443, storageID=DS-1295453364-127.0.1.1-55443-1288795596836, infoPort=58617, ipcPort=48866)
    [junit] 2010-11-03 14:46:47,453 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_-6689290206940956324_1001 terminating
    [junit] 2010-11-03 14:46:47,454 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:47369, storageID=DS-1835264033-127.0.1.1-47369-1288795597382, infoPort=52252, ipcPort=34210)
    [junit] 2010-11-03 14:46:47,455 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:55443
    [junit] 2010-11-03 14:46:47,455 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:47369, storageID=DS-1835264033-127.0.1.1-47369-1288795597382, infoPort=52252, ipcPort=34210)
    [junit] 2010-11-03 14:46:47,455 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-03 14:46:47,456 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-6689290206940956324_1001 src: /127.0.0.1:56380 dest: /127.0.0.1:55443
    [junit] 2010-11-03 14:46:47,456 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-6689290206940956324_1001
    [junit] 2010-11-03 14:46:47,457 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:47369
    [junit] 2010-11-03 14:46:47,458 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-03 14:46:47,458 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-6689290206940956324_1001 src: /127.0.0.1:38232 dest: /127.0.0.1:47369
    [junit] 2010-11-03 14:46:47,458 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-6689290206940956324_1001
    [junit] 2010-11-03 14:46:47,459 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:55443
    [junit] 2010-11-03 14:46:47,459 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-6689290206940956324_1002 src: /127.0.0.1:38232 dest: /127.0.0.1:47369 of size 1
    [junit] 2010-11-03 14:46:47,459 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-6689290206940956324_1002 src: /127.0.0.1:56380 dest: /127.0.0.1:55443 of size 1
    [junit] 2010-11-03 14:46:47,461 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:47369 is added to blk_-6689290206940956324_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:55443|RBW], ReplicaUnderConstruction[127.0.0.1:47369|RBW], ReplicaUnderConstruction[127.0.0.1:46024|RBW]]} size 0
    [junit] 2010-11-03 14:46:47,461 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4569)) - updatePipeline(block=blk_-6689290206940956324_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:55443, 127.0.0.1:47369], clientName=DFSClient_439051706)
    [junit] 2010-11-03 14:46:47,463 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4611)) - updatePipeline(blk_-6689290206940956324_1001) successfully to blk_-6689290206940956324_1002
    [junit] 2010-11-03 14:46:47,463 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:55443 is added to blk_-6689290206940956324_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:55443|RBW], ReplicaUnderConstruction[127.0.0.1:47369|RBW]]} size 1
    [junit] 2010-11-03 14:46:47,477 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_439051706
    [junit] 2010-11-03 14:46:47,479 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-11-03 14:46:47,481 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:55443
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-11-03 14:46:47,482 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:55443, dest: /127.0.0.1:56382, bytes: 5, op: HDFS_READ, cliID: DFSClient_439051706, offset: 0, srvID: DS-1295453364-127.0.1.1-55443-1288795596836, blockid: blk_-6689290206940956324_1002, duration: 271783
    [junit] 2010-11-03 14:46:47,482 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-11-03 14:46:47,483 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:55443
    [junit] 2010-11-03 14:46:47,585 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 34210
    [junit] 2010-11-03 14:46:47,585 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 34210: exiting
    [junit] 2010-11-03 14:46:47,585 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-11-03 14:46:47,585 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:47369, storageID=DS-1835264033-127.0.1.1-47369-1288795597382, infoPort=52252, ipcPort=34210):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-03 14:46:47,585 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-03 14:46:47,585 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 34210
    [junit] 2010-11-03 14:46:47,588 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-03 14:46:47,588 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-03 14:46:47,589 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:47369, storageID=DS-1835264033-127.0.1.1-47369-1288795597382, infoPort=52252, ipcPort=34210):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-11-03 14:46:47,589 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 34210
    [junit] 2010-11-03 14:46:47,589 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-03 14:46:47,589 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-03 14:46:47,590 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-03 14:46:47,590 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-03 14:46:47,591 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-11-03 14:46:47,692 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 38781
    [junit] 2010-11-03 14:46:47,693 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 38781: exiting
    [junit] 2010-11-03 14:46:47,693 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-03 14:46:47,693 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 38781
    [junit] 2010-11-03 14:46:47,693 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:46024, storageID=DS-1296196881-127.0.1.1-46024-1288795597113, infoPort=35033, ipcPort=38781)
    [junit] 2010-11-03 14:46:47,693 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:46024, storageID=DS-1296196881-127.0.1.1-46024-1288795597113, infoPort=35033, ipcPort=38781):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-03 14:46:47,693 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_-6689290206940956324_1001 0 : Thread is interrupted.
    [junit] 2010-11-03 14:46:47,693 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-11-03 14:46:47,694 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_-6689290206940956324_1001 terminating
    [junit] 2010-11-03 14:46:47,694 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:46024, storageID=DS-1296196881-127.0.1.1-46024-1288795597113, infoPort=35033, ipcPort=38781)
    [junit] 2010-11-03 14:46:47,695 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:46024, storageID=DS-1296196881-127.0.1.1-46024-1288795597113, infoPort=35033, ipcPort=38781):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-11-03 14:46:47,697 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-03 14:46:47,697 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-03 14:46:47,697 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:46024, storageID=DS-1296196881-127.0.1.1-46024-1288795597113, infoPort=35033, ipcPort=38781):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-11-03 14:46:47,698 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 38781
    [junit] 2010-11-03 14:46:47,698 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-03 14:46:47,698 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-03 14:46:47,698 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-03 14:46:47,699 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-03 14:46:47,699 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-11-03 14:46:47,801 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 48866
    [junit] 2010-11-03 14:46:47,801 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 48866: exiting
    [junit] 2010-11-03 14:46:47,801 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 48866
    [junit] 2010-11-03 14:46:47,802 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-11-03 14:46:47,802 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:55443, storageID=DS-1295453364-127.0.1.1-55443-1288795596836, infoPort=58617, ipcPort=48866):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-03 14:46:47,801 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-03 14:46:47,869 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-03 14:46:47,903 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:55443, storageID=DS-1295453364-127.0.1.1-55443-1288795596836, infoPort=58617, ipcPort=48866):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-11-03 14:46:47,903 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 48866
    [junit] 2010-11-03 14:46:47,903 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-03 14:46:47,904 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-03 14:46:47,904 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-03 14:46:47,904 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-03 14:46:48,006 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-03 14:46:48,006 INFO namenode.FSEditLog (FSEditLog.java:printStatistics(630)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3
    [junit] 2010-11-03 14:46:48,006 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-03 14:46:48,008 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 37554
    [junit] 2010-11-03 14:46:48,008 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 37554: exiting
    [junit] 2010-11-03 14:46:48,008 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 37554: exiting
    [junit] 2010-11-03 14:46:48,008 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 37554: exiting
    [junit] 2010-11-03 14:46:48,008 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 37554: exiting
    [junit] 2010-11-03 14:46:48,009 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 37554: exiting
    [junit] 2010-11-03 14:46:48,010 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 37554: exiting
    [junit] 2010-11-03 14:46:48,009 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 37554
    [junit] 2010-11-03 14:46:48,009 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-03 14:46:48,009 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 37554: exiting
    [junit] 2010-11-03 14:46:48,009 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 37554: exiting
    [junit] 2010-11-03 14:46:48,009 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 37554: exiting
    [junit] 2010-11-03 14:46:48,009 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 37554: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 75.521 sec

    checkfailure:
    [touch] Creating <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/testsfailed>

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:706: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:473: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/src/test/aop/build/aop.xml>:230: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:658: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:621: The following error occurred while executing this line:
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:689: Tests failed!

    Total time: 190 minutes 36 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Nov 4, 2010 at 3:32 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/477/>

    ------------------------------------------
    [...truncated 764810 lines...]
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-04 15:33:41,379 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-04 15:33:41,381 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_4909546972313332591_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-04 15:33:41,381 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_4909546972313332591_1001 terminating
    [junit] 2010-11-04 15:33:41,382 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:45801, storageID=DS-172278402-127.0.1.1-45801-1288884810655, infoPort=50720, ipcPort=46659)
    [junit] 2010-11-04 15:33:41,382 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-04 15:33:41,382 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:45801, storageID=DS-172278402-127.0.1.1-45801-1288884810655, infoPort=50720, ipcPort=46659)
    [junit] 2010-11-04 15:33:41,382 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-04 15:33:41,384 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:45801
    [junit] 2010-11-04 15:33:41,384 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_4909546972313332591_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:543)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-04 15:33:41,384 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-04 15:33:41,385 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_4909546972313332591_1001 terminating
    [junit] 2010-11-04 15:33:41,385 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_4909546972313332591_1001 src: /127.0.0.1:40481 dest: /127.0.0.1:45801
    [junit] 2010-11-04 15:33:41,385 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_4909546972313332591_1001
    [junit] 2010-11-04 15:33:41,385 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:53030, storageID=DS-1063102100-127.0.1.1-53030-1288884811300, infoPort=56521, ipcPort=58459)
    [junit] 2010-11-04 15:33:41,386 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:53030, storageID=DS-1063102100-127.0.1.1-53030-1288884811300, infoPort=56521, ipcPort=58459)
    [junit] 2010-11-04 15:33:41,387 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:53030
    [junit] 2010-11-04 15:33:41,387 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-04 15:33:41,387 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_4909546972313332591_1001 src: /127.0.0.1:38594 dest: /127.0.0.1:53030
    [junit] 2010-11-04 15:33:41,387 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_4909546972313332591_1001
    [junit] 2010-11-04 15:33:41,388 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:45801
    [junit] 2010-11-04 15:33:41,389 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:53030 is added to blk_4909546972313332591_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:45801|RBW], ReplicaUnderConstruction[127.0.0.1:53030|RBW], ReplicaUnderConstruction[127.0.0.1:56014|RBW]]} size 0
    [junit] 2010-11-04 15:33:41,389 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4569)) - updatePipeline(block=blk_4909546972313332591_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:45801, 127.0.0.1:53030], clientName=DFSClient_1235756338)
    [junit] 2010-11-04 15:33:41,388 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_4909546972313332591_1002 src: /127.0.0.1:38594 dest: /127.0.0.1:53030 of size 1
    [junit] 2010-11-04 15:33:41,390 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_4909546972313332591_1002 src: /127.0.0.1:40481 dest: /127.0.0.1:45801 of size 1
    [junit] 2010-11-04 15:33:41,393 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4611)) - updatePipeline(blk_4909546972313332591_1001) successfully to blk_4909546972313332591_1002
    [junit] 2010-11-04 15:33:41,393 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:45801 is added to blk_4909546972313332591_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:45801|RBW], ReplicaUnderConstruction[127.0.0.1:53030|RBW]]} size 1
    [junit] 2010-11-04 15:33:41,396 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_1235756338
    [junit] 2010-11-04 15:33:41,399 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-11-04 15:33:41,401 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:53030
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-11-04 15:33:41,402 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:53030, dest: /127.0.0.1:38595, bytes: 5, op: HDFS_READ, cliID: DFSClient_1235756338, offset: 0, srvID: DS-1063102100-127.0.1.1-53030-1288884811300, blockid: blk_4909546972313332591_1002, duration: 261785
    [junit] 2010-11-04 15:33:41,402 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-11-04 15:33:41,402 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:53030
    [junit] 2010-11-04 15:33:41,504 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 58459
    [junit] 2010-11-04 15:33:41,505 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 58459: exiting
    [junit] 2010-11-04 15:33:41,505 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-04 15:33:41,505 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:53030, storageID=DS-1063102100-127.0.1.1-53030-1288884811300, infoPort=56521, ipcPort=58459):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-04 15:33:41,505 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 58459
    [junit] 2010-11-04 15:33:41,506 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-04 15:33:41,507 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-04 15:33:41,507 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:53030, storageID=DS-1063102100-127.0.1.1-53030-1288884811300, infoPort=56521, ipcPort=58459):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-11-04 15:33:41,507 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 58459
    [junit] 2010-11-04 15:33:41,507 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-04 15:33:41,508 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-04 15:33:41,508 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-04 15:33:41,509 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-04 15:33:41,509 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-11-04 15:33:41,611 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 58725
    [junit] 2010-11-04 15:33:41,611 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 58725: exiting
    [junit] 2010-11-04 15:33:41,611 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_4909546972313332591_1001 0 : Thread is interrupted.
    [junit] 2010-11-04 15:33:41,612 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_4909546972313332591_1001 terminating
    [junit] 2010-11-04 15:33:41,612 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:56014, storageID=DS-1621623980-127.0.1.1-56014-1288884811038, infoPort=46852, ipcPort=58725)
    [junit] 2010-11-04 15:33:41,612 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:56014, storageID=DS-1621623980-127.0.1.1-56014-1288884811038, infoPort=46852, ipcPort=58725)
    [junit] 2010-11-04 15:33:41,613 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:56014, storageID=DS-1621623980-127.0.1.1-56014-1288884811038, infoPort=46852, ipcPort=58725):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-11-04 15:33:41,611 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-11-04 15:33:41,611 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-04 15:33:41,611 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 58725
    [junit] 2010-11-04 15:33:41,612 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:56014, storageID=DS-1621623980-127.0.1.1-56014-1288884811038, infoPort=46852, ipcPort=58725):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-04 15:33:41,616 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-04 15:33:41,617 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-04 15:33:41,617 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:56014, storageID=DS-1621623980-127.0.1.1-56014-1288884811038, infoPort=46852, ipcPort=58725):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-11-04 15:33:41,617 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 58725
    [junit] 2010-11-04 15:33:41,617 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-04 15:33:41,618 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-04 15:33:41,618 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-04 15:33:41,618 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-04 15:33:41,618 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-11-04 15:33:41,728 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 46659
    [junit] 2010-11-04 15:33:41,729 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 46659: exiting
    [junit] 2010-11-04 15:33:41,729 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 46659
    [junit] 2010-11-04 15:33:41,729 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:45801, storageID=DS-172278402-127.0.1.1-45801-1288884810655, infoPort=50720, ipcPort=46659):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-04 15:33:41,729 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-11-04 15:33:41,729 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-04 15:33:41,810 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-04 15:33:41,835 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:45801, storageID=DS-172278402-127.0.1.1-45801-1288884810655, infoPort=50720, ipcPort=46659):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-11-04 15:33:41,835 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 46659
    [junit] 2010-11-04 15:33:41,835 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-04 15:33:41,836 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-04 15:33:41,836 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-04 15:33:41,837 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-04 15:33:41,939 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-04 15:33:41,939 INFO namenode.FSEditLog (FSEditLog.java:printStatistics(630)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 3
    [junit] 2010-11-04 15:33:41,939 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-04 15:33:41,941 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 57984
    [junit] 2010-11-04 15:33:41,941 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 57984: exiting
    [junit] 2010-11-04 15:33:41,941 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-04 15:33:41,941 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 57984: exiting
    [junit] 2010-11-04 15:33:41,941 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 57984: exiting
    [junit] 2010-11-04 15:33:41,941 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 57984: exiting
    [junit] 2010-11-04 15:33:41,941 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 57984: exiting
    [junit] 2010-11-04 15:33:41,941 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 57984: exiting
    [junit] 2010-11-04 15:33:41,943 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 57984: exiting
    [junit] 2010-11-04 15:33:41,943 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 57984: exiting
    [junit] 2010-11-04 15:33:41,943 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 57984: exiting
    [junit] 2010-11-04 15:33:41,943 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 57984: exiting
    [junit] 2010-11-04 15:33:41,943 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 57984
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 71.723 sec

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 235 minutes 46 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Nov 5, 2010 at 3:24 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/478/changes>

    Changes:

    [nigel] Update hudson patch admin script to fix missing option

    [nigel] HADOOP-7008. Fix bug and missing license header in test-patch files. (nigel)

    ------------------------------------------
    [...truncated 747926 lines...]
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-05 15:25:10,580 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-05 15:25:10,582 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_4070416229188018339_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-05 15:25:10,582 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_4070416229188018339_1001 terminating
    [junit] 2010-11-05 15:25:10,582 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:41837, storageID=DS-1134118578-127.0.1.1-41837-1288970700511, infoPort=43544, ipcPort=38247)
    [junit] 2010-11-05 15:25:10,582 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-05 15:25:10,583 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-05 15:25:10,583 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:41837, storageID=DS-1134118578-127.0.1.1-41837-1288970700511, infoPort=43544, ipcPort=38247)
    [junit] 2010-11-05 15:25:10,584 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_4070416229188018339_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-05 15:25:10,584 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_4070416229188018339_1001 terminating
    [junit] 2010-11-05 15:25:10,584 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:36700, storageID=DS-1997291136-127.0.1.1-36700-1288970700237, infoPort=56622, ipcPort=33515)
    [junit] 2010-11-05 15:25:10,585 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:36700, storageID=DS-1997291136-127.0.1.1-36700-1288970700237, infoPort=56622, ipcPort=33515)
    [junit] 2010-11-05 15:25:10,585 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:41837
    [junit] 2010-11-05 15:25:10,585 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-05 15:25:10,585 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_4070416229188018339_1001 src: /127.0.0.1:45362 dest: /127.0.0.1:41837
    [junit] 2010-11-05 15:25:10,586 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_4070416229188018339_1001
    [junit] 2010-11-05 15:25:10,587 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:36700
    [junit] 2010-11-05 15:25:10,587 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-05 15:25:10,587 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_4070416229188018339_1001 src: /127.0.0.1:56271 dest: /127.0.0.1:36700
    [junit] 2010-11-05 15:25:10,588 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_4070416229188018339_1001
    [junit] 2010-11-05 15:25:10,588 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:41837
    [junit] 2010-11-05 15:25:10,589 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_4070416229188018339_1002 src: /127.0.0.1:56271 dest: /127.0.0.1:36700 of size 1
    [junit] 2010-11-05 15:25:10,590 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_4070416229188018339_1002 src: /127.0.0.1:45362 dest: /127.0.0.1:41837 of size 1
    [junit] 2010-11-05 15:25:10,590 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:36700 is added to blk_4070416229188018339_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:41837|RBW], ReplicaUnderConstruction[127.0.0.1:36700|RBW], ReplicaUnderConstruction[127.0.0.1:37877|RBW]]} size 0
    [junit] 2010-11-05 15:25:10,591 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4569)) - updatePipeline(block=blk_4070416229188018339_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:41837, 127.0.0.1:36700], clientName=DFSClient_1729929599)
    [junit] 2010-11-05 15:25:10,593 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4611)) - updatePipeline(blk_4070416229188018339_1001) successfully to blk_4070416229188018339_1002
    [junit] 2010-11-05 15:25:10,594 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:41837 is added to blk_4070416229188018339_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:41837|RBW], ReplicaUnderConstruction[127.0.0.1:36700|RBW]]} size 1
    [junit] 2010-11-05 15:25:10,607 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_1729929599
    [junit] 2010-11-05 15:25:10,610 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-11-05 15:25:10,612 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:41837
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-11-05 15:25:10,613 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:41837, dest: /127.0.0.1:45364, bytes: 5, op: HDFS_READ, cliID: DFSClient_1729929599, offset: 0, srvID: DS-1134118578-127.0.1.1-41837-1288970700511, blockid: blk_4070416229188018339_1002, duration: 279376
    [junit] 2010-11-05 15:25:10,613 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:41837
    [junit] 2010-11-05 15:25:10,613 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-11-05 15:25:10,718 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 38247
    [junit] 2010-11-05 15:25:10,718 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 38247: exiting
    [junit] 2010-11-05 15:25:10,718 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 38247
    [junit] 2010-11-05 15:25:10,719 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:41837, storageID=DS-1134118578-127.0.1.1-41837-1288970700511, infoPort=43544, ipcPort=38247):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-05 15:25:10,718 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-11-05 15:25:10,718 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-05 15:25:10,720 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-05 15:25:10,720 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:41837, storageID=DS-1134118578-127.0.1.1-41837-1288970700511, infoPort=43544, ipcPort=38247):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-11-05 15:25:10,720 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 38247
    [junit] 2010-11-05 15:25:10,720 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-05 15:25:10,721 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-05 15:25:10,721 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-05 15:25:10,721 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-05 15:25:10,722 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-11-05 15:25:10,827 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 33515
    [junit] 2010-11-05 15:25:10,828 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 33515: exiting
    [junit] 2010-11-05 15:25:10,828 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-05 15:25:10,828 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-11-05 15:25:10,828 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:36700, storageID=DS-1997291136-127.0.1.1-36700-1288970700237, infoPort=56622, ipcPort=33515):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-05 15:25:10,829 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 33515
    [junit] 2010-11-05 15:25:10,831 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-05 15:25:10,831 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-05 15:25:10,831 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:36700, storageID=DS-1997291136-127.0.1.1-36700-1288970700237, infoPort=56622, ipcPort=33515):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-11-05 15:25:10,832 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 33515
    [junit] 2010-11-05 15:25:10,832 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-05 15:25:10,832 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-05 15:25:10,832 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-05 15:25:10,832 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-05 15:25:10,833 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-11-05 15:25:10,935 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 37852
    [junit] 2010-11-05 15:25:10,935 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 37852: exiting
    [junit] 2010-11-05 15:25:10,935 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 37852
    [junit] 2010-11-05 15:25:10,935 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-05 15:25:10,936 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:37877, storageID=DS-1451603870-127.0.1.1-37877-1288970699916, infoPort=42185, ipcPort=37852):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-05 15:25:10,936 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:37877, storageID=DS-1451603870-127.0.1.1-37877-1288970699916, infoPort=42185, ipcPort=37852)
    [junit] 2010-11-05 15:25:10,936 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_4070416229188018339_1001 0 : Thread is interrupted.
    [junit] 2010-11-05 15:25:10,936 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-11-05 15:25:10,936 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_4070416229188018339_1001 terminating
    [junit] 2010-11-05 15:25:10,936 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:37877, storageID=DS-1451603870-127.0.1.1-37877-1288970699916, infoPort=42185, ipcPort=37852)
    [junit] 2010-11-05 15:25:10,937 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:37877, storageID=DS-1451603870-127.0.1.1-37877-1288970699916, infoPort=42185, ipcPort=37852):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-11-05 15:25:10,939 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-05 15:25:10,941 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-05 15:25:11,039 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:37877, storageID=DS-1451603870-127.0.1.1-37877-1288970699916, infoPort=42185, ipcPort=37852):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-11-05 15:25:11,040 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 37852
    [junit] 2010-11-05 15:25:11,040 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-05 15:25:11,040 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-05 15:25:11,040 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-05 15:25:11,041 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-05 15:25:11,143 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-05 15:25:11,143 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-05 15:25:11,144 INFO namenode.FSEditLog (FSEditLog.java:printStatistics(630)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 2
    [junit] 2010-11-05 15:25:11,145 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 45388
    [junit] 2010-11-05 15:25:11,145 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 45388: exiting
    [junit] 2010-11-05 15:25:11,145 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-05 15:25:11,146 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 45388: exiting
    [junit] 2010-11-05 15:25:11,146 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 45388
    [junit] 2010-11-05 15:25:11,145 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 45388: exiting
    [junit] 2010-11-05 15:25:11,146 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 45388: exiting
    [junit] 2010-11-05 15:25:11,146 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 45388: exiting
    [junit] 2010-11-05 15:25:11,147 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 45388: exiting
    [junit] 2010-11-05 15:25:11,147 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 45388: exiting
    [junit] 2010-11-05 15:25:11,147 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 45388: exiting
    [junit] 2010-11-05 15:25:11,147 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 45388: exiting
    [junit] 2010-11-05 15:25:11,146 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 45388: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.43 sec

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 230 minutes 23 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Nov 6, 2010 at 3:32 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/479/>

    ------------------------------------------
    [...truncated 783325 lines...]
    [junit] 2010-11-06 15:33:27,591 WARN hdfs.DFSClient (DFSOutputStream.java:setupPipelineForAppendOrRecovery(776)) - Error Recovery for block blk_6958453284457679588_1001 in pipeline 127.0.0.1:44662, 127.0.0.1:38927, 127.0.0.1:44296: bad datanode 127.0.0.1:44296
    [junit] 2010-11-06 15:33:27,592 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$8$9594fb70(205)) - FI: fiPipelineAck, datanode=DatanodeRegistration(127.0.0.1:44662, storageID=DS-1262001424-127.0.1.1-44662-1289057597516, infoPort=38219, ipcPort=52957)
    [junit] 2010-11-06 15:33:27,592 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-06 15:33:27,593 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-06 15:33:27,594 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_6958453284457679588_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-06 15:33:27,594 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_6958453284457679588_1001 terminating
    [junit] 2010-11-06 15:33:27,594 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:44662, storageID=DS-1262001424-127.0.1.1-44662-1289057597516, infoPort=38219, ipcPort=52957)
    [junit] 2010-11-06 15:33:27,594 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:44662, storageID=DS-1262001424-127.0.1.1-44662-1289057597516, infoPort=38219, ipcPort=52957)
    [junit] 2010-11-06 15:33:27,595 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-06 15:33:27,595 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-06 15:33:27,596 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_6958453284457679588_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at java.io.DataOutputStream.writeLong(DataOutputStream.java:207)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:542)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-06 15:33:27,596 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_6958453284457679588_1001 terminating
    [junit] 2010-11-06 15:33:27,597 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:38927, storageID=DS-457788649-127.0.1.1-38927-1289057597264, infoPort=40129, ipcPort=42111)
    [junit] 2010-11-06 15:33:27,597 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:38927, storageID=DS-457788649-127.0.1.1-38927-1289057597264, infoPort=40129, ipcPort=42111)
    [junit] 2010-11-06 15:33:27,599 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:44662
    [junit] 2010-11-06 15:33:27,599 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-06 15:33:27,599 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_6958453284457679588_1001 src: /127.0.0.1:52728 dest: /127.0.0.1:44662
    [junit] 2010-11-06 15:33:27,600 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_6958453284457679588_1001
    [junit] 2010-11-06 15:33:27,601 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:38927
    [junit] 2010-11-06 15:33:27,601 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-06 15:33:27,601 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_6958453284457679588_1001 src: /127.0.0.1:58097 dest: /127.0.0.1:38927
    [junit] 2010-11-06 15:33:27,602 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_6958453284457679588_1001
    [junit] 2010-11-06 15:33:27,602 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:44662
    [junit] 2010-11-06 15:33:27,603 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:38927 is added to blk_6958453284457679588_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:44662|RBW], ReplicaUnderConstruction[127.0.0.1:38927|RBW], ReplicaUnderConstruction[127.0.0.1:44296|RBW]]} size 0
    [junit] 2010-11-06 15:33:27,602 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_6958453284457679588_1002 src: /127.0.0.1:58097 dest: /127.0.0.1:38927 of size 1
    [junit] 2010-11-06 15:33:27,604 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_6958453284457679588_1002 src: /127.0.0.1:52728 dest: /127.0.0.1:44662 of size 1
    [junit] 2010-11-06 15:33:27,606 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:44662 is added to blk_6958453284457679588_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:44662|RBW], ReplicaUnderConstruction[127.0.0.1:38927|RBW], ReplicaUnderConstruction[127.0.0.1:44296|RBW]]} size 0
    [junit] 2010-11-06 15:33:27,607 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4569)) - updatePipeline(block=blk_6958453284457679588_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:44662, 127.0.0.1:38927], clientName=DFSClient_1579435144)
    [junit] 2010-11-06 15:33:27,609 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4611)) - updatePipeline(blk_6958453284457679588_1001) successfully to blk_6958453284457679588_1002
    [junit] 2010-11-06 15:33:27,612 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_1579435144
    [junit] 2010-11-06 15:33:27,614 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-11-06 15:33:27,616 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:44662
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-11-06 15:33:27,617 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:44662, dest: /127.0.0.1:52730, bytes: 5, op: HDFS_READ, cliID: DFSClient_1579435144, offset: 0, srvID: DS-1262001424-127.0.1.1-44662-1289057597516, blockid: blk_6958453284457679588_1002, duration: 265153
    [junit] 2010-11-06 15:33:27,617 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-11-06 15:33:27,618 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:44662
    [junit] 2010-11-06 15:33:27,719 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 52957
    [junit] 2010-11-06 15:33:27,720 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 52957: exiting
    [junit] 2010-11-06 15:33:27,720 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 52957
    [junit] 2010-11-06 15:33:27,720 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:44662, storageID=DS-1262001424-127.0.1.1-44662-1289057597516, infoPort=38219, ipcPort=52957):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-06 15:33:27,720 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-06 15:33:27,721 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-06 15:33:27,722 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-06 15:33:27,722 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:44662, storageID=DS-1262001424-127.0.1.1-44662-1289057597516, infoPort=38219, ipcPort=52957):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-11-06 15:33:27,722 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 52957
    [junit] 2010-11-06 15:33:27,722 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-06 15:33:27,723 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-06 15:33:27,723 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-06 15:33:27,723 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-06 15:33:27,724 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-11-06 15:33:27,825 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 42111
    [junit] 2010-11-06 15:33:27,830 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 42111
    [junit] 2010-11-06 15:33:27,830 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-06 15:33:27,830 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 42111: exiting
    [junit] 2010-11-06 15:33:27,830 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-11-06 15:33:27,830 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:38927, storageID=DS-457788649-127.0.1.1-38927-1289057597264, infoPort=40129, ipcPort=42111):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-06 15:33:27,833 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-06 15:33:27,833 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-06 15:33:27,834 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:38927, storageID=DS-457788649-127.0.1.1-38927-1289057597264, infoPort=40129, ipcPort=42111):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-11-06 15:33:27,834 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 42111
    [junit] 2010-11-06 15:33:27,834 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-06 15:33:27,834 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-06 15:33:27,835 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-06 15:33:27,835 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-06 15:33:27,835 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-11-06 15:33:27,937 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 59972
    [junit] 2010-11-06 15:33:27,937 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 59972: exiting
    [junit] 2010-11-06 15:33:27,937 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-06 15:33:27,938 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:44296, storageID=DS-679984939-127.0.1.1-44296-1289057596976, infoPort=40418, ipcPort=59972):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-06 15:33:27,938 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-11-06 15:33:27,938 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:44296, storageID=DS-679984939-127.0.1.1-44296-1289057596976, infoPort=40418, ipcPort=59972)
    [junit] 2010-11-06 15:33:27,937 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 59972
    [junit] 2010-11-06 15:33:27,939 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:44296, storageID=DS-679984939-127.0.1.1-44296-1289057596976, infoPort=40418, ipcPort=59972)
    [junit] 2010-11-06 15:33:27,938 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_6958453284457679588_1001 0 : Thread is interrupted.
    [junit] 2010-11-06 15:33:27,939 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_6958453284457679588_1001 terminating
    [junit] 2010-11-06 15:33:27,939 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:44296, storageID=DS-679984939-127.0.1.1-44296-1289057596976, infoPort=40418, ipcPort=59972):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-11-06 15:33:27,940 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-06 15:33:28,009 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-06 15:33:28,041 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:44296, storageID=DS-679984939-127.0.1.1-44296-1289057596976, infoPort=40418, ipcPort=59972):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-11-06 15:33:28,041 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 59972
    [junit] 2010-11-06 15:33:28,041 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-06 15:33:28,042 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-06 15:33:28,042 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-06 15:33:28,042 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-06 15:33:28,144 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-06 15:33:28,144 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-06 15:33:28,144 INFO namenode.FSEditLog (FSEditLog.java:printStatistics(630)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 4 4
    [junit] 2010-11-06 15:33:28,146 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 51155
    [junit] 2010-11-06 15:33:28,146 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 51155: exiting
    [junit] 2010-11-06 15:33:28,147 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 51155: exiting
    [junit] 2010-11-06 15:33:28,147 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 51155: exiting
    [junit] 2010-11-06 15:33:28,146 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 51155: exiting
    [junit] 2010-11-06 15:33:28,147 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 51155: exiting
    [junit] 2010-11-06 15:33:28,147 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 51155: exiting
    [junit] 2010-11-06 15:33:28,147 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 51155: exiting
    [junit] 2010-11-06 15:33:28,147 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 51155
    [junit] 2010-11-06 15:33:28,147 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 51155: exiting
    [junit] 2010-11-06 15:33:28,147 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-06 15:33:28,146 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 51155: exiting
    [junit] 2010-11-06 15:33:28,146 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 51155: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 75.33 sec

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 238 minutes 33 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure
  • Apache Hudson Server at Nov 7, 2010 at 3:35 pm
    See <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/480/>

    ------------------------------------------
    [...truncated 767127 lines...]
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-07 15:36:39,056 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-07 15:36:39,058 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-4535220890722960074_1001 2 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:150)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-07 15:36:39,058 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 2 for block blk_-4535220890722960074_1001 terminating
    [junit] 2010-11-07 15:36:39,058 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:55802, storageID=DS-923246433-127.0.1.1-55802-1289144188473, infoPort=48995, ipcPort=59095)
    [junit] 2010-11-07 15:36:39,058 WARN datanode.DataNode (BlockReceiver.java:run(948)) - IOException in BlockReceiver.run():
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-07 15:36:39,059 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:55802, storageID=DS-923246433-127.0.1.1-55802-1289144188473, infoPort=48995, ipcPort=59095)
    [junit] 2010-11-07 15:36:39,059 WARN datanode.DataNode (DataNode.java:checkDiskError(828)) - checkDiskError: exception:
    [junit] java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] 2010-11-07 15:36:39,060 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:55802
    [junit] 2010-11-07 15:36:39,060 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-07 15:36:39,061 INFO datanode.DataNode (BlockReceiver.java:run(955)) - PacketResponder blk_-4535220890722960074_1001 1 Exception java.io.IOException: Connection reset by peer
    [junit] at sun.nio.ch.FileDispatcher.write0(Native Method)
    [junit] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:29)
    [junit] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:104)
    [junit] at sun.nio.ch.IOUtil.write(IOUtil.java:75)
    [junit] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:334)
    [junit] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:60)
    [junit] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:151)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:112)
    [junit] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:105)
    [junit] at java.io.DataOutputStream.writeShort(DataOutputStream.java:151)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Status.write(DataTransferProtocol.java:120)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$PipelineAck.write(DataTransferProtocol.java:545)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody0(BlockReceiver.java:931)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.write_aroundBody1$advice(BlockReceiver.java:160)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:931)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-07 15:36:39,061 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-4535220890722960074_1001 src: /127.0.0.1:42939 dest: /127.0.0.1:55802
    [junit] 2010-11-07 15:36:39,061 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 1 for block blk_-4535220890722960074_1001 terminating
    [junit] 2010-11-07 15:36:39,061 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-4535220890722960074_1001
    [junit] 2010-11-07 15:36:39,061 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:53784, storageID=DS-2136093556-127.0.1.1-53784-1289144188991, infoPort=47866, ipcPort=59855)
    [junit] 2010-11-07 15:36:39,062 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:53784, storageID=DS-2136093556-127.0.1.1-53784-1289144188991, infoPort=47866, ipcPort=59855)
    [junit] 2010-11-07 15:36:39,062 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp WRITE_BLOCK, datanode=127.0.0.1:53784
    [junit] 2010-11-07 15:36:39,063 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$before$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$3$3251489(73)) - FI: receiverOpWriteBlock
    [junit] 2010-11-07 15:36:39,063 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(231)) - Receiving block blk_-4535220890722960074_1001 src: /127.0.0.1:34480 dest: /127.0.0.1:53784
    [junit] 2010-11-07 15:36:39,063 INFO datanode.DataNode (FSDataset.java:recoverClose(1246)) - Recover failed close blk_-4535220890722960074_1001
    [junit] 2010-11-07 15:36:39,064 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead SUCCESS, datanode=127.0.0.1:55802
    [junit] 2010-11-07 15:36:39,064 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-4535220890722960074_1002 src: /127.0.0.1:34480 dest: /127.0.0.1:53784 of size 1
    [junit] 2010-11-07 15:36:39,065 INFO datanode.DataNode (DataXceiver.java:opWriteBlock(380)) - Received block blk_-4535220890722960074_1002 src: /127.0.0.1:42939 dest: /127.0.0.1:55802 of size 1
    [junit] 2010-11-07 15:36:39,066 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:53784 is added to blk_-4535220890722960074_1001{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:55802|RBW], ReplicaUnderConstruction[127.0.0.1:53784|RBW], ReplicaUnderConstruction[127.0.0.1:33654|RBW]]} size 0
    [junit] 2010-11-07 15:36:39,066 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4569)) - updatePipeline(block=blk_-4535220890722960074_1001, newGenerationStamp=1002, newLength=1, newNodes=[127.0.0.1:55802, 127.0.0.1:53784], clientName=DFSClient_763046362)
    [junit] 2010-11-07 15:36:39,068 INFO namenode.FSNamesystem (FSNamesystem.java:updatePipeline(4611)) - updatePipeline(blk_-4535220890722960074_1001) successfully to blk_-4535220890722960074_1002
    [junit] 2010-11-07 15:36:39,068 INFO hdfs.StateChange (BlockManager.java:addStoredBlock(1126)) - BLOCK* NameSystem.addStoredBlock: blockMap updated: 127.0.0.1:55802 is added to blk_-4535220890722960074_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[127.0.0.1:55802|RBW], ReplicaUnderConstruction[127.0.0.1:53784|RBW]]} size 1
    [junit] 2010-11-07 15:36:39,071 INFO hdfs.StateChange (FSNamesystem.java:completeFileInternal(1713)) - DIR* NameSystem.completeFile: file /pipeline_Fi_38/foo is closed by DFSClient_763046362
    [junit] 2010-11-07 15:36:39,074 INFO FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson ip=/127.0.0.1 cmd=open src=/pipeline_Fi_38/foo dst=null perm=null
    [junit] 2010-11-07 15:36:39,075 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$1$8f59fdd7(51)) - FI: receiverOp READ_BLOCK, datanode=127.0.0.1:55802
    [junit] Shutting down the Mini HDFS Cluster
    [junit] 2010-11-07 15:36:39,077 INFO DataNode.clienttrace (BlockSender.java:sendBlock(490)) - src: /127.0.0.1:55802, dest: /127.0.0.1:42941, bytes: 5, op: HDFS_READ, cliID: DFSClient_763046362, offset: 0, srvID: DS-923246433-127.0.1.1-55802-1289144188473, blockid: blk_-4535220890722960074_1002, duration: 274102
    [junit] 2010-11-07 15:36:39,077 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 2
    [junit] 2010-11-07 15:36:39,077 INFO datanode.DataTransferProtocolAspects (DataTransferProtocolAspects.aj:ajc$afterReturning$org_apache_hadoop_hdfs_server_datanode_DataTransferProtocolAspects$2$d4f6605f(61)) - FI: statusRead CHECKSUM_OK, datanode=127.0.0.1:55802
    [junit] 2010-11-07 15:36:39,179 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 59855
    [junit] 2010-11-07 15:36:39,179 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 59855: exiting
    [junit] 2010-11-07 15:36:39,179 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-07 15:36:39,179 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-11-07 15:36:39,179 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 59855
    [junit] 2010-11-07 15:36:39,180 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:53784, storageID=DS-2136093556-127.0.1.1-53784-1289144188991, infoPort=47866, ipcPort=59855):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-07 15:36:39,182 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-07 15:36:39,182 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-07 15:36:39,183 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:53784, storageID=DS-2136093556-127.0.1.1-53784-1289144188991, infoPort=47866, ipcPort=59855):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data5/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data6/current/finalized'}>
    [junit] 2010-11-07 15:36:39,183 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 59855
    [junit] 2010-11-07 15:36:39,183 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-07 15:36:39,183 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-07 15:36:39,184 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-07 15:36:39,184 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-07 15:36:39,184 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 1
    [junit] 2010-11-07 15:36:39,286 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 33271
    [junit] 2010-11-07 15:36:39,286 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 33271: exiting
    [junit] 2010-11-07 15:36:39,286 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 33271
    [junit] 2010-11-07 15:36:39,286 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-07 15:36:39,287 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:33654, storageID=DS-814343497-127.0.1.1-33654-1289144188743, infoPort=55049, ipcPort=33271)
    [junit] 2010-11-07 15:36:39,287 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2010-11-07 15:36:39,287 INFO datanode.DataNode (BlockReceiver.java:run(886)) - PacketResponder blk_-4535220890722960074_1001 0 : Thread is interrupted.
    [junit] 2010-11-07 15:36:39,288 INFO datanode.DataNode (BlockReceiver.java:run(971)) - PacketResponder 0 for block blk_-4535220890722960074_1001 terminating
    [junit] 2010-11-07 15:36:39,287 INFO datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:33654, storageID=DS-814343497-127.0.1.1-33654-1289144188743, infoPort=55049, ipcPort=33271)
    [junit] 2010-11-07 15:36:39,289 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 2
    [junit] 2010-11-07 15:36:39,287 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:33654, storageID=DS-814343497-127.0.1.1-33654-1289144188743, infoPort=55049, ipcPort=33271):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-07 15:36:39,290 ERROR datanode.DataNode (DataXceiver.java:run(116)) - DatanodeRegistration(127.0.0.1:33654, storageID=DS-814343497-127.0.1.1-33654-1289144188743, infoPort=55049, ipcPort=33271):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:363)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:114)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] at java.lang.Thread.sleep(Native Method)
    [junit] at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] ... 11 more
    [junit] 2010-11-07 15:36:39,293 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-07 15:36:39,293 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-07 15:36:39,294 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:33654, storageID=DS-814343497-127.0.1.1-33654-1289144188743, infoPort=55049, ipcPort=33271):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}>
    [junit] 2010-11-07 15:36:39,294 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 33271
    [junit] 2010-11-07 15:36:39,294 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-07 15:36:39,294 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-07 15:36:39,295 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-07 15:36:39,295 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-07 15:36:39,295 INFO hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(772)) - Shutting down DataNode 0
    [junit] 2010-11-07 15:36:39,397 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 59095
    [junit] 2010-11-07 15:36:39,397 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 59095: exiting
    [junit] 2010-11-07 15:36:39,397 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-07 15:36:39,397 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-11-07 15:36:39,397 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 59095
    [junit] 2010-11-07 15:36:39,398 WARN datanode.DataNode (DataXceiverServer.java:run(140)) - DatanodeRegistration(127.0.0.1:55802, storageID=DS-923246433-127.0.1.1-55802-1289144188473, infoPort=48995, ipcPort=59095):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:133)
    [junit] at java.lang.Thread.run(Thread.java:619)
    [junit]
    [junit] 2010-11-07 15:36:39,400 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-07 15:36:39,497 INFO datanode.DataBlockScanner (DataBlockScanner.java:run(621)) - Exiting DataBlockScanner thread.
    [junit] 2010-11-07 15:36:39,501 INFO datanode.DataNode (DataNode.java:run(1443)) - DatanodeRegistration(127.0.0.1:55802, storageID=DS-923246433-127.0.1.1-55802-1289144188473, infoPort=48995, ipcPort=59095):Finishing DataNode in: FSDataset{dirpath='<https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}>
    [junit] 2010-11-07 15:36:39,501 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 59095
    [junit] 2010-11-07 15:36:39,501 INFO datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-11-07 15:36:39,501 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-11-07 15:36:39,501 INFO datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-11-07 15:36:39,502 WARN datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-11-07 15:36:39,604 WARN namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-07 15:36:39,604 INFO namenode.FSEditLog (FSEditLog.java:printStatistics(630)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 3
    [junit] 2010-11-07 15:36:39,604 WARN namenode.FSNamesystem (FSNamesystem.java:run(2818)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-11-07 15:36:39,605 INFO ipc.Server (Server.java:stop(1601)) - Stopping server on 59407
    [junit] 2010-11-07 15:36:39,606 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 59407: exiting
    [junit] 2010-11-07 15:36:39,606 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 59407: exiting
    [junit] 2010-11-07 15:36:39,606 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 59407: exiting
    [junit] 2010-11-07 15:36:39,606 INFO ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 59407
    [junit] 2010-11-07 15:36:39,606 INFO ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-11-07 15:36:39,607 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 59407: exiting
    [junit] 2010-11-07 15:36:39,608 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 59407: exiting
    [junit] 2010-11-07 15:36:39,606 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 59407: exiting
    [junit] 2010-11-07 15:36:39,607 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 59407: exiting
    [junit] 2010-11-07 15:36:39,607 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 59407: exiting
    [junit] 2010-11-07 15:36:39,607 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 59407: exiting
    [junit] 2010-11-07 15:36:39,606 INFO ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 59407: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 67.86 sec

    checkfailure:

    run-test-hdfs-all-withtestcaseonly:

    run-test-hdfs:

    BUILD FAILED
    <https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/ws/trunk/build.xml>:708: Tests failed!

    Total time: 241 minutes 44 seconds
    Publishing Javadoc
    Archiving artifacts
    Recording test results
    Recording fingerprints
    Publishing Clover coverage report...
    No Clover report will be published due to a Build Failure

Related Discussions

Discussion Navigation
viewthread | post
Discussion Overview
grouphdfs-dev @
categorieshadoop
postedOct 22, '10 at 1:01a
activeNov 7, '10 at 3:35p
posts18
users1
websitehadoop.apache.org...
irc#hadoop

1 user in discussion

Apache Hudson Server: 18 posts

People

Translate

site design / logo © 2023 Grokbase