FAQ
Can anyone offer me some insight. It may have been due to me trying to run the start-all.sh script instead of starting the services. Not sure.

Thanks
Sean



/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG:   host = ubuntu-mogile-1/127.0.1.1
STARTUP_MSG:   args = []
STARTUP_MSG:   version = 0.20.2-cdh3u1
STARTUP_MSG:   build = file:///tmp/nightly_2011-07-18_07-57-52_3/hadoop-0.20-0.20.2+923.97-1~lucid -r bdafb1dbffd0d5f2fbc6ee022e1c8df6500fd638; compiled by 'root' on Mon Jul 18 09:40:01 PDT 2011
************************************************************/
2011-08-25 17:16:48,653 INFO org.apache.hadoop.metrics.jvm.JvmMetrics: Initializing JVM Metrics with processName=NameNode, sessionId=null
2011-08-25 17:16:48,655 INFO org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics: Initializing NameNodeMeterics using context object:org.apache.hadoop.metrics.spi.NoEmitMetricsContext
2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: VM type       = 64-bit
2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: 2% max memory = 17.77875 MB
2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: capacity      = 2^21 = 2097152 entries
2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: recommended=2097152, actual=2097152
2011-08-25 17:16:48,676 ERROR org.apache.hadoop.hdfs.server.namenode.FSNamesystem: FSNamesystem initialization failed.
java.io.IOException: Expecting a line not the end of stream
at org.apache.hadoop.fs.DF.parseExecResult(DF.java:117)
at org.apache.hadoop.util.Shell.runCommand(Shell.java:237)
at org.apache.hadoop.util.Shell.run(Shell.java:182)
at org.apache.hadoop.fs.DF.getFilesystem(DF.java:63)
at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.addDirsToCheck(NameNodeResourceChecker.java:87)
at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.(FSNamesystem.java:348)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(NameNode.java:271)
at org.apache.hadoop.hdfs.server.namenode.NameNode.(NameNode.java:1224)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)
2011-08-25 17:16:48,678 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: java.lang.NullPointerException
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.close(FSNamesystem.java:560)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(NameNode.java:271)
at org.apache.hadoop.hdfs.server.namenode.NameNode.(NameNode.java:1224)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)

2011-08-25 17:16:48,678 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at ubuntu-mogile-1/127.0.1.1
************************************************************/



20$ sudo ls -lR
.:
total 4
drwxrwxrwx 4 root root 4096 2011-08-25 17:15 cache

./cache:
total 8
drwxr-xr-x 3 hdfs   hdfs   4096 2011-08-25 17:15 hdfs
drwxr-xr-x 3 mapred mapred 4096 2011-08-25 17:15 mapred

./cache/hdfs:
total 4
drwxr-xr-x 3 hdfs hdfs 4096 2011-08-25 17:15 dfs

./cache/hdfs/dfs:
total 4
drwx------ 2 hdfs hdfs 4096 2011-08-25 17:15 data

./cache/hdfs/dfs/data:
total 0

./cache/mapred:
total 4
drwxr-xr-x 3 mapred mapred 4096 2011-08-25 17:15 mapred

./cache/mapred/mapred:
total 4
drwxr-xr-x 6 mapred mapred 4096 2011-08-25 17:16 local

./cache/mapred/mapred/local:
total 16
drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 taskTracker
drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 toBeDeleted
drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 tt_log_tmp
drwx------ 2 mapred mapred 4096 2011-08-25 17:16 ttprivate

./cache/mapred/mapred/local/taskTracker:
total 0

./cache/mapred/mapred/local/toBeDeleted:
total 0

./cache/mapred/mapred/local/tt_log_tmp:
total 0

./cache/mapred/mapred/local/ttprivate:
total 0

Search Discussions

  • Sean wagner at Aug 26, 2011 at 12:53 am
    Can anyone offer me some insight. It may have been due to me trying to run the start-all.sh script instead of starting the services. Not sure.

    Thanks
    Sean



    /************************************************************
    STARTUP_MSG: Starting NameNode
    STARTUP_MSG:   host = ubuntu-mogile-1/127.0.1.1
    STARTUP_MSG:   args = []
    STARTUP_MSG:   version = 0.20.2-cdh3u1
    STARTUP_MSG:   build = file:///tmp/nightly_2011-07-18_07-57-52_3/hadoop-0.20-0.20.2+923.97-1~lucid -r bdafb1dbffd0d5f2fbc6ee022e1c8df6500fd638; compiled by 'root' on Mon Jul 18 09:40:01 PDT 2011
    ************************************************************/
    2011-08-25 17:16:48,653 INFO org.apache.hadoop.metrics.jvm.JvmMetrics:
    Initializing JVM Metrics with processName=NameNode, sessionId=null
    2011-08-25 17:16:48,655 INFO org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics: Initializing NameNodeMeterics using context object:org.apache.hadoop.metrics.spi.NoEmitMetricsContext
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: VM type       = 64-bit
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: 2% max memory = 17.77875 MB
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: capacity      = 2^21 = 2097152 entries
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: recommended=2097152, actual=2097152
    2011-08-25 17:16:48,676 ERROR org.apache.hadoop.hdfs.server.namenode.FSNamesystem: FSNamesystem initialization failed.
    java.io.IOException: Expecting a line not the end of stream
    at
    org.apache.hadoop.fs.DF.parseExecResult(DF.java:117)
    at org.apache.hadoop.util.Shell.runCommand(Shell.java:237)
    at org.apache.hadoop.util.Shell.run(Shell.java:182)
    at org.apache.hadoop.fs.DF.getFilesystem(DF.java:63)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.addDirsToCheck(NameNodeResourceChecker.java:87)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.(FSNamesystem.java:348)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(NameNode.java:271)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.(NameNode.java:1224)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)
    2011-08-25 17:16:48,678 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: java.lang.NullPointerException
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.close(FSNamesystem.java:560)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.(NameNode.java:271)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.(NameNode.java:1224)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)

    2011-08-25 17:16:48,678 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
    /************************************************************
    SHUTDOWN_MSG: Shutting down NameNode at ubuntu-mogile-1/127.0.1.1
    ************************************************************/



    20$ sudo ls -lR
    .:
    total 4
    drwxrwxrwx 4 root root 4096 2011-08-25 17:15 cache

    ./cache:
    total 8
    drwxr-xr-x 3 hdfs   hdfs   4096 2011-08-25 17:15 hdfs
    drwxr-xr-x 3 mapred mapred 4096 2011-08-25 17:15 mapred

    ./cache/hdfs:
    total 4
    drwxr-xr-x 3 hdfs hdfs 4096 2011-08-25 17:15 dfs

    ./cache/hdfs/dfs:
    total 4
    drwx------ 2 hdfs hdfs 4096 2011-08-25 17:15 data

    ./cache/hdfs/dfs/data:
    total 0

    ./cache/mapred:
    total 4
    drwxr-xr-x 3 mapred mapred 4096
    2011-08-25 17:15 mapred

    ./cache/mapred/mapred:
    total 4
    drwxr-xr-x 6 mapred mapred 4096 2011-08-25 17:16 local

    ./cache/mapred/mapred/local:
    total 16
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 taskTracker
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 toBeDeleted
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 tt_log_tmp
    drwx------ 2 mapred mapred 4096 2011-08-25 17:16 ttprivate

    ./cache/mapred/mapred/local/taskTracker:
    total 0

    ./cache/mapred/mapred/local/toBeDeleted:
    total 0

    ./cache/mapred/mapred/local/tt_log_tmp:
    total 0

    ./cache/mapred/mapred/local/ttprivate:
    total 0
  • Harsh J at Aug 26, 2011 at 3:06 am
    Hello Sean,

    Welcome to the hadoop mailing lists, and thanks for asking your
    question supplied with good data!

    Moving this to cdh-user@cloudera.org list as you're using the CDH3
    version of Apache Hadoop. (bcc'd common-user@hadoop.apache.org and
    cc'd you)

    Some questions below:

    - Can you run "sudo -u hdfs df -kh /var/lib/hadoop-0.20/cache/dfs"?
    What does the output carry?
    On Fri, Aug 26, 2011 at 6:23 AM, sean wagner wrote:


    Can anyone offer me some insight. It may have been due to me trying to run the start-all.sh script instead of starting the services. Not sure.

    Thanks
    Sean



    /************************************************************
    STARTUP_MSG: Starting NameNode
    STARTUP_MSG:   host = ubuntu-mogile-1/127.0.1.1
    STARTUP_MSG:   args = []
    STARTUP_MSG:   version = 0.20.2-cdh3u1
    STARTUP_MSG:   build = file:///tmp/nightly_2011-07-18_07-57-52_3/hadoop-0.20-0.20.2+923.97-1~lucid -r bdafb1dbffd0d5f2fbc6ee022e1c8df6500fd638; compiled by 'root' on Mon Jul 18 09:40:01 PDT 2011
    ************************************************************/
    2011-08-25 17:16:48,653 INFO org.apache.hadoop.metrics.jvm.JvmMetrics:
    Initializing JVM Metrics with processName=NameNode, sessionId=null
    2011-08-25 17:16:48,655 INFO org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics: Initializing NameNodeMeterics using context object:org.apache.hadoop.metrics.spi.NoEmitMetricsContext
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: VM type       = 64-bit
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: 2% max memory = 17.77875 MB
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: capacity      = 2^21 = 2097152 entries
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: recommended=2097152, actual=2097152
    2011-08-25 17:16:48,676 ERROR org.apache.hadoop.hdfs.server.namenode.FSNamesystem: FSNamesystem initialization failed.
    java.io.IOException: Expecting a line not the end of stream
    at
    org.apache.hadoop.fs.DF.parseExecResult(DF.java:117)
    at org.apache.hadoop.util.Shell.runCommand(Shell.java:237)
    at org.apache.hadoop.util.Shell.run(Shell.java:182)
    at org.apache.hadoop.fs.DF.getFilesystem(DF.java:63)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.addDirsToCheck(NameNodeResourceChecker.java:87)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.<init>(NameNodeResourceChecker.java:71)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:348)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:327)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:271)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:465)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1224)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)
    2011-08-25 17:16:48,678 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: java.lang.NullPointerException
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.close(FSNamesystem.java:560)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:330)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:271)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:465)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1224)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)

    2011-08-25 17:16:48,678 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
    /************************************************************
    SHUTDOWN_MSG: Shutting down NameNode at ubuntu-mogile-1/127.0.1.1
    ************************************************************/



    20$ sudo ls -lR
    .:
    total 4
    drwxrwxrwx 4 root root 4096 2011-08-25 17:15 cache

    ./cache:
    total 8
    drwxr-xr-x 3 hdfs   hdfs   4096 2011-08-25 17:15 hdfs
    drwxr-xr-x 3 mapred mapred 4096 2011-08-25 17:15 mapred

    ./cache/hdfs:
    total 4
    drwxr-xr-x 3 hdfs hdfs 4096 2011-08-25 17:15 dfs

    ./cache/hdfs/dfs:
    total 4
    drwx------ 2 hdfs hdfs 4096 2011-08-25 17:15 data

    ./cache/hdfs/dfs/data:
    total 0

    ./cache/mapred:
    total 4
    drwxr-xr-x 3 mapred mapred 4096
    2011-08-25 17:15 mapred

    ./cache/mapred/mapred:
    total 4
    drwxr-xr-x 6 mapred mapred 4096 2011-08-25 17:16 local

    ./cache/mapred/mapred/local:
    total 16
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 taskTracker
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 toBeDeleted
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 tt_log_tmp
    drwx------ 2 mapred mapred 4096 2011-08-25 17:16 ttprivate

    ./cache/mapred/mapred/local/taskTracker:
    total 0

    ./cache/mapred/mapred/local/toBeDeleted:
    total 0

    ./cache/mapred/mapred/local/tt_log_tmp:
    total 0

    ./cache/mapred/mapred/local/ttprivate:
    total 0


    --
    Harsh J
  • Marcos Luis Ortiz Valmaseda at Aug 26, 2011 at 3:25 am
    Hello, Sean, Can you provide the information how do you initialize the
    HDFS service?

    2011/8/25, Harsh J <harsh@cloudera.com>:
    Hello Sean,

    Welcome to the hadoop mailing lists, and thanks for asking your
    question supplied with good data!

    Moving this to cdh-user@cloudera.org list as you're using the CDH3
    version of Apache Hadoop. (bcc'd common-user@hadoop.apache.org and
    cc'd you)

    Some questions below:

    - Can you run "sudo -u hdfs df -kh /var/lib/hadoop-0.20/cache/dfs"?
    What does the output carry?
    On Fri, Aug 26, 2011 at 6:23 AM, sean wagner wrote:


    Can anyone offer me some insight. It may have been due to me trying to run
    the start-all.sh script instead of starting the services. Not sure.

    Thanks
    Sean



    /************************************************************
    STARTUP_MSG: Starting NameNode
    STARTUP_MSG: host = ubuntu-mogile-1/127.0.1.1
    STARTUP_MSG: args = []
    STARTUP_MSG: version = 0.20.2-cdh3u1
    STARTUP_MSG: build =
    file:///tmp/nightly_2011-07-18_07-57-52_3/hadoop-0.20-0.20.2+923.97-1~lucid
    -r bdafb1dbffd0d5f2fbc6ee022e1c8df6500fd638; compiled by 'root' on Mon Jul
    18 09:40:01 PDT 2011
    ************************************************************/
    2011-08-25 17:16:48,653 INFO org.apache.hadoop.metrics.jvm.JvmMetrics:
    Initializing JVM Metrics with processName=NameNode, sessionId=null
    2011-08-25 17:16:48,655 INFO
    org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics:
    Initializing NameNodeMeterics using context
    object:org.apache.hadoop.metrics.spi.NoEmitMetricsContext
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: VM
    type = 64-bit
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: 2% max
    memory = 17.77875 MB
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet:
    capacity = 2^21 = 2097152 entries
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet:
    recommended=2097152, actual=2097152
    2011-08-25 17:16:48,676 ERROR
    org.apache.hadoop.hdfs.server.namenode.FSNamesystem: FSNamesystem
    initialization failed.
    java.io.IOException: Expecting a line not the end of stream
    at
    org.apache.hadoop.fs.DF.parseExecResult(DF.java:117)
    at org.apache.hadoop.util.Shell.runCommand(Shell.java:237)
    at org.apache.hadoop.util.Shell.run(Shell.java:182)
    at org.apache.hadoop.fs.DF.getFilesystem(DF.java:63)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.addDirsToCheck(NameNodeResourceChecker.java:87)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.<init>(NameNodeResourceChecker.java:71)
    at
    org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:348)
    at
    org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:327)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:271)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:465)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1224)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)
    2011-08-25 17:16:48,678 ERROR
    org.apache.hadoop.hdfs.server.namenode.NameNode:
    java.lang.NullPointerException
    at
    org.apache.hadoop.hdfs.server.namenode.FSNamesystem.close(FSNamesystem.java:560)
    at
    org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:330)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:271)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:465)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1224)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)

    2011-08-25 17:16:48,678 INFO
    org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
    /************************************************************
    SHUTDOWN_MSG: Shutting down NameNode at ubuntu-mogile-1/127.0.1.1
    ************************************************************/



    20$ sudo ls -lR
    .:
    total 4
    drwxrwxrwx 4 root root 4096 2011-08-25 17:15 cache

    ./cache:
    total 8
    drwxr-xr-x 3 hdfs hdfs 4096 2011-08-25 17:15 hdfs
    drwxr-xr-x 3 mapred mapred 4096 2011-08-25 17:15 mapred

    ./cache/hdfs:
    total 4
    drwxr-xr-x 3 hdfs hdfs 4096 2011-08-25 17:15 dfs

    ./cache/hdfs/dfs:
    total 4
    drwx------ 2 hdfs hdfs 4096 2011-08-25 17:15 data

    ./cache/hdfs/dfs/data:
    total 0

    ./cache/mapred:
    total 4
    drwxr-xr-x 3 mapred mapred 4096
    2011-08-25 17:15 mapred

    ./cache/mapred/mapred:
    total 4
    drwxr-xr-x 6 mapred mapred 4096 2011-08-25 17:16 local

    ./cache/mapred/mapred/local:
    total 16
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 taskTracker
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 toBeDeleted
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 tt_log_tmp
    drwx------ 2 mapred mapred 4096 2011-08-25 17:16 ttprivate

    ./cache/mapred/mapred/local/taskTracker:
    total 0

    ./cache/mapred/mapred/local/toBeDeleted:
    total 0

    ./cache/mapred/mapred/local/tt_log_tmp:
    total 0

    ./cache/mapred/mapred/local/ttprivate:
    total 0


    --
    Harsh J

    --
    --
    Marcos Luis Ortíz Valmaseda
    Software Engineer (UCI)
    Linux User # 418229
    http://marcosluis2186.posterous.com
    http://www.linkedin.com/in/marcosluis2186
    https://fedoraproject.org/wiki/User:Marcosluis
  • Sean wagner at Aug 27, 2011 at 4:28 pm
    To start hadoop I do:


    $ for service in /etc/init.d/hadoop-0.20-*
    do
    sudo $service start
    done

    running "sudo -u hdfs df -kh /var/lib/hadoop-0.20/cache/dfs"

    df: `/var/lib/hadoop-0.20/cache/dfs': No such file or directory
    df: no file systems processed

    Core site


    <?xml version="1.0"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

    <configuration>
    <property>
    <name>fs.default.name</name>
    <value>hdfs://localhost:8020</value>
    </property>

    <property>
    <name>hadoop.tmp.dir</name>
    <value>/var/lib/hadoop-0.20/cache/${user.name}</value>
    </property>

    <!-- OOZIE proxy user setting -->
    <property>
    <name>hadoop.proxyuser.oozie.hosts</name>
    <value>*</value>
    </property>
    <property>
    <name>hadoop.proxyuser.oozie.groups</name>

    <value>*</value>
    </property>

    </configuration>

    hdfs site


    <?xml version="1.0"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

    <configuration>
    <property>
    <name>dfs.replication</name>
    <value>1</value>
    </property>
    <property>
    <name>dfs.permissions</name>
    <value>false</value>
    </property>
    <property>
    <!-- specify this so that running 'hadoop namenode -format' formats the right dir -->
    <name>dfs.name.dir</name>
    <value>/var/lib/hadoop-0.20/cache/hadoop/dfs/name</value>

    </property>

    <!-- Enable Hue Plugins -->
    <property>
    <name>dfs.namenode.plugins</name>
    <value>org.apache.hadoop.thriftfs.NamenodePlugin</value>
    <description>Comma-separated list of namenode plug-ins to be activated.
    </description>
    </property>
    <property>
    <name>dfs.datanode.plugins</name>
    <value>org.apache.hadoop.thriftfs.DatanodePlugin</value>
    <description>Comma-separated list of datanode plug-ins to be activated.
    </description>
    </property>
    <property>
    <name>dfs.thrift.address</name>
    <value>0.0.0.0:10090</value>

    </property>
    </configuration>


    ________________________________
    From: Harsh J <harsh@cloudera.com>
    To: cdh-user@cloudera.org
    Cc: sean wagner <wagz211@yahoo.com>
    Sent: Friday, August 26, 2011 12:28 AM
    Subject: Re: Error while trying to start hadoop on ubuntu lucene first time.

    Also, please paste contents of your hdfs-site.xml and core-site.xml? I
    believe I have reproduced this issue and found a solution, and would
    like to confirm.
    On Fri, Aug 26, 2011 at 8:35 AM, Harsh J wrote:
    Hello Sean,

    Welcome to the hadoop mailing lists, and thanks for asking your
    question supplied with good data!

    Moving this to cdh-user@cloudera.org list as you're using the CDH3
    version of Apache Hadoop. (bcc'd common-user@hadoop.apache.org and
    cc'd you)

    Some questions below:

    - Can you run "sudo -u hdfs df -kh /var/lib/hadoop-0.20/cache/dfs"?
    What does the output carry?
    On Fri, Aug 26, 2011 at 6:23 AM, sean wagner wrote:


    Can anyone offer me some insight. It may have been due to me trying to run the start-all.sh script instead of starting the services. Not sure.

    Thanks
    Sean



    /************************************************************
    STARTUP_MSG: Starting NameNode
    STARTUP_MSG:   host = ubuntu-mogile-1/127.0.1.1
    STARTUP_MSG:   args = []
    STARTUP_MSG:   version = 0.20.2-cdh3u1
    STARTUP_MSG:   build = file:///tmp/nightly_2011-07-18_07-57-52_3/hadoop-0.20-0.20.2+923.97-1~lucid -r bdafb1dbffd0d5f2fbc6ee022e1c8df6500fd638; compiled by 'root' on Mon Jul 18 09:40:01 PDT 2011
    ************************************************************/
    2011-08-25 17:16:48,653 INFO org.apache.hadoop.metrics.jvm.JvmMetrics:
    Initializing JVM Metrics with processName=NameNode, sessionId=null
    2011-08-25 17:16:48,655 INFO org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics: Initializing NameNodeMeterics using context object:org.apache.hadoop.metrics.spi.NoEmitMetricsContext
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: VM type       = 64-bit
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: 2% max memory = 17.77875 MB
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: capacity      = 2^21 = 2097152 entries
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: recommended=2097152, actual=2097152
    2011-08-25 17:16:48,676 ERROR org.apache.hadoop.hdfs.server.namenode.FSNamesystem: FSNamesystem initialization failed.
    java.io.IOException: Expecting a line not the end of stream
    at
    org.apache.hadoop.fs.DF.parseExecResult(DF.java:117)
    at org.apache.hadoop.util.Shell.runCommand(Shell.java:237)
    at org.apache.hadoop.util.Shell.run(Shell.java:182)
    at org.apache.hadoop.fs.DF.getFilesystem(DF.java:63)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.addDirsToCheck(NameNodeResourceChecker.java:87)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.<init>(NameNodeResourceChecker.java:71)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:348)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:327)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:271)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:465)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1224)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)
    2011-08-25 17:16:48,678 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: java.lang.NullPointerException
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.close(FSNamesystem.java:560)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:330)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:271)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:465)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1224)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)

    2011-08-25 17:16:48,678 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
    /************************************************************
    SHUTDOWN_MSG: Shutting down NameNode at ubuntu-mogile-1/127.0.1.1
    ************************************************************/



    20$ sudo ls -lR
    .:
    total 4
    drwxrwxrwx 4 root root 4096 2011-08-25 17:15 cache

    ./cache:
    total 8
    drwxr-xr-x 3 hdfs   hdfs   4096 2011-08-25 17:15 hdfs
    drwxr-xr-x 3 mapred mapred 4096 2011-08-25 17:15 mapred

    ./cache/hdfs:
    total 4
    drwxr-xr-x 3 hdfs hdfs 4096 2011-08-25 17:15 dfs

    ./cache/hdfs/dfs:
    total 4
    drwx------ 2 hdfs hdfs 4096 2011-08-25 17:15 data

    ./cache/hdfs/dfs/data:
    total 0

    ./cache/mapred:
    total 4
    drwxr-xr-x 3 mapred mapred 4096
    2011-08-25 17:15 mapred

    ./cache/mapred/mapred:
    total 4
    drwxr-xr-x 6 mapred mapred 4096 2011-08-25 17:16 local

    ./cache/mapred/mapred/local:
    total 16
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 taskTracker
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 toBeDeleted
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 tt_log_tmp
    drwx------ 2 mapred mapred 4096 2011-08-25 17:16 ttprivate

    ./cache/mapred/mapred/local/taskTracker:
    total 0

    ./cache/mapred/mapred/local/toBeDeleted:
    total 0

    ./cache/mapred/mapred/local/tt_log_tmp:
    total 0

    ./cache/mapred/mapred/local/ttprivate:
    total 0


    --
    Harsh J


    --
    Harsh J
  • Sean wagner at Aug 29, 2011 at 7:16 am
    Harsh did you have a solution for this?



    ________________________________
    From: sean wagner <wagz211@yahoo.com>
    To: "common-user@hadoop.apache.org" <common-user@hadoop.apache.org>
    Sent: Saturday, August 27, 2011 9:27 AM
    Subject: Re: Error while trying to start hadoop on ubuntu lucene first time.

    To start hadoop I do:


    $ for service in /etc/init.d/hadoop-0.20-*
    do
    sudo $service start
    done

    running "sudo -u hdfs df -kh /var/lib/hadoop-0.20/cache/dfs"

    df: `/var/lib/hadoop-0.20/cache/dfs': No such file or directory
    df: no file systems processed

    Core site


    <?xml version="1.0"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

    <configuration>
    <property>
    <name>fs.default.name</name>
    <value>hdfs://localhost:8020</value>
    </property>

    <property>
    <name>hadoop.tmp.dir</name>
    <value>/var/lib/hadoop-0.20/cache/${user.name}</value>
    </property>

    <!-- OOZIE proxy user setting -->
    <property>
    <name>hadoop.proxyuser.oozie.hosts</name>
    <value>*</value>
    </property>
    <property>
    <name>hadoop.proxyuser.oozie.groups</name>

    <value>*</value>
    </property>

    </configuration>

    hdfs site


    <?xml version="1.0"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

    <configuration>
    <property>
    <name>dfs.replication</name>
    <value>1</value>
    </property>
    <property>
    <name>dfs.permissions</name>
    <value>false</value>
    </property>
    <property>
    <!-- specify this so that running 'hadoop namenode -format' formats the right dir -->
    <name>dfs.name.dir</name>
    <value>/var/lib/hadoop-0.20/cache/hadoop/dfs/name</value>

    </property>

    <!-- Enable Hue Plugins -->
    <property>
    <name>dfs.namenode.plugins</name>
    <value>org.apache.hadoop.thriftfs.NamenodePlugin</value>
    <description>Comma-separated list of namenode plug-ins to be activated.
    </description>
    </property>
    <property>
    <name>dfs.datanode.plugins</name>
    <value>org.apache.hadoop.thriftfs.DatanodePlugin</value>
    <description>Comma-separated list of datanode plug-ins to be activated.
    </description>
    </property>
    <property>
    <name>dfs.thrift.address</name>
    <value>0.0.0.0:10090</value>

    </property>
    </configuration>


    ________________________________
    From: Harsh J <harsh@cloudera.com>
    To: cdh-user@cloudera.org
    Cc: sean wagner <wagz211@yahoo.com>
    Sent: Friday, August 26, 2011 12:28 AM
    Subject: Re: Error while trying to start hadoop on ubuntu lucene first time.

    Also, please paste contents of your hdfs-site.xml and core-site.xml? I
    believe I have reproduced this issue and found a solution, and would
    like to confirm.
    On Fri, Aug 26, 2011 at 8:35 AM, Harsh J wrote:
    Hello Sean,

    Welcome to the hadoop mailing lists, and thanks for asking your
    question supplied with good data!

    Moving this to cdh-user@cloudera.org list as you're using the CDH3
    version of Apache Hadoop. (bcc'd common-user@hadoop.apache.org and
    cc'd you)

    Some questions below:

    - Can you run "sudo -u hdfs df -kh /var/lib/hadoop-0.20/cache/dfs"?
    What does the output carry?
    On Fri, Aug 26, 2011 at 6:23 AM, sean wagner wrote:


    Can anyone offer me some insight. It may have been due to me trying to run the start-all.sh script instead of starting the services. Not sure.

    Thanks
    Sean



    /************************************************************
    STARTUP_MSG: Starting NameNode
    STARTUP_MSG:   host = ubuntu-mogile-1/127.0.1.1
    STARTUP_MSG:   args = []
    STARTUP_MSG:   version = 0.20.2-cdh3u1
    STARTUP_MSG:   build = file:///tmp/nightly_2011-07-18_07-57-52_3/hadoop-0.20-0.20.2+923.97-1~lucid -r bdafb1dbffd0d5f2fbc6ee022e1c8df6500fd638; compiled by 'root' on Mon Jul 18 09:40:01 PDT 2011
    ************************************************************/
    2011-08-25 17:16:48,653 INFO org.apache.hadoop.metrics.jvm.JvmMetrics:
    Initializing JVM Metrics with processName=NameNode, sessionId=null
    2011-08-25 17:16:48,655 INFO org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics: Initializing NameNodeMeterics using context object:org.apache.hadoop.metrics.spi.NoEmitMetricsContext
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: VM type       = 64-bit
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: 2% max memory = 17.77875 MB
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: capacity      = 2^21 = 2097152 entries
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: recommended=2097152, actual=2097152
    2011-08-25 17:16:48,676 ERROR org.apache.hadoop.hdfs.server.namenode.FSNamesystem: FSNamesystem initialization failed.
    java.io.IOException: Expecting a line not the end of stream
    at
    org.apache.hadoop.fs.DF.parseExecResult(DF.java:117)
    at org.apache.hadoop.util.Shell.runCommand(Shell.java:237)
    at org.apache.hadoop.util.Shell.run(Shell.java:182)
    at org.apache.hadoop.fs.DF.getFilesystem(DF.java:63)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.addDirsToCheck(NameNodeResourceChecker.java:87)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.<init>(NameNodeResourceChecker.java:71)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:348)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:327)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:271)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:465)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1224)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)
    2011-08-25 17:16:48,678 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: java.lang.NullPointerException
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.close(FSNamesystem.java:560)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:330)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:271)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:465)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1224)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)

    2011-08-25 17:16:48,678 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
    /************************************************************
    SHUTDOWN_MSG: Shutting down NameNode at ubuntu-mogile-1/127.0.1.1
    ************************************************************/



    20$ sudo ls -lR
    .:
    total 4
    drwxrwxrwx 4 root root 4096 2011-08-25 17:15 cache

    ./cache:
    total 8
    drwxr-xr-x 3 hdfs   hdfs   4096 2011-08-25 17:15 hdfs
    drwxr-xr-x 3 mapred mapred 4096 2011-08-25 17:15 mapred

    ./cache/hdfs:
    total 4
    drwxr-xr-x 3 hdfs hdfs 4096 2011-08-25 17:15 dfs

    ./cache/hdfs/dfs:
    total 4
    drwx------ 2 hdfs hdfs 4096 2011-08-25 17:15 data

    ./cache/hdfs/dfs/data:
    total 0

    ./cache/mapred:
    total 4
    drwxr-xr-x 3 mapred mapred 4096
    2011-08-25 17:15 mapred

    ./cache/mapred/mapred:
    total 4
    drwxr-xr-x 6 mapred mapred 4096 2011-08-25 17:16 local

    ./cache/mapred/mapred/local:
    total 16
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 taskTracker
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 toBeDeleted
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 tt_log_tmp
    drwx------ 2 mapred mapred 4096 2011-08-25 17:16 ttprivate

    ./cache/mapred/mapred/local/taskTracker:
    total 0

    ./cache/mapred/mapred/local/toBeDeleted:
    total 0

    ./cache/mapred/mapred/local/tt_log_tmp:
    total 0

    ./cache/mapred/mapred/local/ttprivate:
    total 0


    --
    Harsh J


    --
    Harsh J
  • Harsh J at Aug 29, 2011 at 11:20 am
    Hello,

    Sorry for the late response. Looks like you haven't formatted your
    NameNode (seeing your ls outputs against hdfs-site.xml). The
    dfs.name.dir does not exist when the NN tries to check for it, and
    that is why your startup fails.

    Have you done the "hadoop namenode -format" step of the setup yet?
    On Mon, Aug 29, 2011 at 12:44 PM, sean wagner wrote:
    Harsh did you have a solution for this?



    ________________________________
    From: sean wagner <wagz211@yahoo.com>
    To: "common-user@hadoop.apache.org" <common-user@hadoop.apache.org>
    Sent: Saturday, August 27, 2011 9:27 AM
    Subject: Re: Error while trying to start hadoop on ubuntu lucene first time.

    To start hadoop I do:


    $ for service in /etc/init.d/hadoop-0.20-*
    do
    sudo $service start
    done

    running "sudo -u hdfs df -kh /var/lib/hadoop-0.20/cache/dfs"

    df: `/var/lib/hadoop-0.20/cache/dfs': No such file or directory
    df: no file systems processed

    Core site


    <?xml version="1.0"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

    <configuration>
    <property>
    <name>fs.default.name</name>
    <value>hdfs://localhost:8020</value>
    </property>

    <property>
    <name>hadoop.tmp.dir</name>
    <value>/var/lib/hadoop-0.20/cache/${user.name}</value>
    </property>

    <!-- OOZIE proxy user setting -->
    <property>
    <name>hadoop.proxyuser.oozie.hosts</name>
    <value>*</value>
    </property>
    <property>
    <name>hadoop.proxyuser.oozie.groups</name>

    <value>*</value>
    </property>

    </configuration>

    hdfs site


    <?xml version="1.0"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

    <configuration>
    <property>
    <name>dfs.replication</name>
    <value>1</value>
    </property>
    <property>
    <name>dfs.permissions</name>
    <value>false</value>
    </property>
    <property>
    <!-- specify this so that running 'hadoop namenode -format' formats the right dir -->
    <name>dfs.name.dir</name>
    <value>/var/lib/hadoop-0.20/cache/hadoop/dfs/name</value>

    </property>

    <!-- Enable Hue Plugins -->
    <property>
    <name>dfs.namenode.plugins</name>
    <value>org.apache.hadoop.thriftfs.NamenodePlugin</value>
    <description>Comma-separated list of namenode plug-ins to be activated.
    </description>
    </property>
    <property>
    <name>dfs.datanode.plugins</name>
    <value>org.apache.hadoop.thriftfs.DatanodePlugin</value>
    <description>Comma-separated list of datanode plug-ins to be activated.
    </description>
    </property>
    <property>
    <name>dfs.thrift.address</name>
    <value>0.0.0.0:10090</value>

    </property>
    </configuration>


    ________________________________
    From: Harsh J <harsh@cloudera.com>
    To: cdh-user@cloudera.org
    Cc: sean wagner <wagz211@yahoo.com>
    Sent: Friday, August 26, 2011 12:28 AM
    Subject: Re: Error while trying to start hadoop on ubuntu lucene first time.

    Also, please paste contents of your hdfs-site.xml and core-site.xml? I
    believe I have reproduced this issue and found a solution, and would
    like to confirm.
    On Fri, Aug 26, 2011 at 8:35 AM, Harsh J wrote:
    Hello Sean,

    Welcome to the hadoop mailing lists, and thanks for asking your
    question supplied with good data!

    Moving this to cdh-user@cloudera.org list as you're using the CDH3
    version of Apache Hadoop. (bcc'd common-user@hadoop.apache.org and
    cc'd you)

    Some questions below:

    - Can you run "sudo -u hdfs df -kh /var/lib/hadoop-0.20/cache/dfs"?
    What does the output carry?
    On Fri, Aug 26, 2011 at 6:23 AM, sean wagner wrote:


    Can anyone offer me some insight. It may have been due to me trying to run the start-all.sh script instead of starting the services. Not sure.

    Thanks
    Sean



    /************************************************************
    STARTUP_MSG: Starting NameNode
    STARTUP_MSG:   host = ubuntu-mogile-1/127.0.1.1
    STARTUP_MSG:   args = []
    STARTUP_MSG:   version = 0.20.2-cdh3u1
    STARTUP_MSG:   build = file:///tmp/nightly_2011-07-18_07-57-52_3/hadoop-0.20-0.20.2+923.97-1~lucid -r bdafb1dbffd0d5f2fbc6ee022e1c8df6500fd638; compiled by 'root' on Mon Jul 18 09:40:01 PDT 2011
    ************************************************************/
    2011-08-25 17:16:48,653 INFO org.apache.hadoop.metrics.jvm.JvmMetrics:
    Initializing JVM Metrics with processName=NameNode, sessionId=null
    2011-08-25 17:16:48,655 INFO org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics: Initializing NameNodeMeterics using context object:org.apache.hadoop.metrics.spi.NoEmitMetricsContext
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: VM type       = 64-bit
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: 2% max memory = 17.77875 MB
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: capacity      = 2^21 = 2097152 entries
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: recommended=2097152, actual=2097152
    2011-08-25 17:16:48,676 ERROR org.apache.hadoop.hdfs.server.namenode.FSNamesystem: FSNamesystem initialization failed.
    java.io.IOException: Expecting a line not the end of stream
    at
    org.apache.hadoop.fs.DF.parseExecResult(DF.java:117)
    at org.apache.hadoop.util.Shell.runCommand(Shell.java:237)
    at org.apache.hadoop.util.Shell.run(Shell.java:182)
    at org.apache.hadoop.fs.DF.getFilesystem(DF.java:63)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.addDirsToCheck(NameNodeResourceChecker.java:87)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.<init>(NameNodeResourceChecker.java:71)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:348)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:327)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:271)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:465)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1224)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)
    2011-08-25 17:16:48,678 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: java.lang.NullPointerException
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.close(FSNamesystem.java:560)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:330)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:271)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:465)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1224)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)

    2011-08-25 17:16:48,678 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
    /************************************************************
    SHUTDOWN_MSG: Shutting down NameNode at ubuntu-mogile-1/127.0.1.1
    ************************************************************/



    20$ sudo ls -lR
    .:
    total 4
    drwxrwxrwx 4 root root 4096 2011-08-25 17:15 cache

    ./cache:
    total 8
    drwxr-xr-x 3 hdfs   hdfs   4096 2011-08-25 17:15 hdfs
    drwxr-xr-x 3 mapred mapred 4096 2011-08-25 17:15 mapred

    ./cache/hdfs:
    total 4
    drwxr-xr-x 3 hdfs hdfs 4096 2011-08-25 17:15 dfs

    ./cache/hdfs/dfs:
    total 4
    drwx------ 2 hdfs hdfs 4096 2011-08-25 17:15 data

    ./cache/hdfs/dfs/data:
    total 0

    ./cache/mapred:
    total 4
    drwxr-xr-x 3 mapred mapred 4096
    2011-08-25 17:15 mapred

    ./cache/mapred/mapred:
    total 4
    drwxr-xr-x 6 mapred mapred 4096 2011-08-25 17:16 local

    ./cache/mapred/mapred/local:
    total 16
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 taskTracker
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 toBeDeleted
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 tt_log_tmp
    drwx------ 2 mapred mapred 4096 2011-08-25 17:16 ttprivate

    ./cache/mapred/mapred/local/taskTracker:
    total 0

    ./cache/mapred/mapred/local/toBeDeleted:
    total 0

    ./cache/mapred/mapred/local/tt_log_tmp:
    total 0

    ./cache/mapred/mapred/local/ttprivate:
    total 0


    --
    Harsh J


    --
    Harsh J


    --
    Harsh J
  • Sean wagner at Aug 29, 2011 at 5:29 pm
    Ok I did a format using a random user account. That screwed up permissions so that it wouldn't start. Setting rwx to everything in /var/lib/hadoop-0.20 made it start successfully. Which user account was I supposed to run the format in?

    Thanks



    ________________________________
    From: Harsh J <harsh@cloudera.com>
    To: common-user@hadoop.apache.org; sean wagner <wagz211@yahoo.com>
    Sent: Monday, August 29, 2011 4:18 AM
    Subject: Re: Error while trying to start hadoop on ubuntu lucene first time.

    Hello,

    Sorry for the late response. Looks like you haven't formatted your
    NameNode (seeing your ls outputs against hdfs-site.xml). The
    dfs.name.dir does not exist when the NN tries to check for it, and
    that is why your startup fails.

    Have you done the "hadoop namenode -format" step of the setup yet?
    On Mon, Aug 29, 2011 at 12:44 PM, sean wagner wrote:
    Harsh did you have a solution for this?



    ________________________________
    From: sean wagner <wagz211@yahoo.com>
    To: "common-user@hadoop.apache.org" <common-user@hadoop.apache.org>
    Sent: Saturday, August 27, 2011 9:27 AM
    Subject: Re: Error while trying to start hadoop on ubuntu lucene first time.

    To start hadoop I do:


    $ for service in /etc/init.d/hadoop-0.20-*
    do
    sudo $service start
    done

    running "sudo -u hdfs df -kh /var/lib/hadoop-0.20/cache/dfs"

    df: `/var/lib/hadoop-0.20/cache/dfs': No such file or directory
    df: no file systems processed

    Core site


    <?xml version="1.0"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

    <configuration>
    <property>
    <name>fs.default.name</name>
    <value>hdfs://localhost:8020</value>
    </property>

    <property>
    <name>hadoop.tmp.dir</name>
    <value>/var/lib/hadoop-0.20/cache/${user.name}</value>
    </property>

    <!-- OOZIE proxy user setting -->
    <property>
    <name>hadoop.proxyuser.oozie.hosts</name>
    <value>*</value>
    </property>
    <property>
    <name>hadoop.proxyuser.oozie.groups</name>

    <value>*</value>
    </property>

    </configuration>

    hdfs site


    <?xml version="1.0"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

    <configuration>
    <property>
    <name>dfs.replication</name>
    <value>1</value>
    </property>
    <property>
    <name>dfs.permissions</name>
    <value>false</value>
    </property>
    <property>
    <!-- specify this so that running 'hadoop namenode -format' formats the right dir -->
    <name>dfs.name.dir</name>
    <value>/var/lib/hadoop-0.20/cache/hadoop/dfs/name</value>

    </property>

    <!-- Enable Hue Plugins -->
    <property>
    <name>dfs.namenode.plugins</name>
    <value>org.apache.hadoop.thriftfs.NamenodePlugin</value>
    <description>Comma-separated list of namenode plug-ins to be activated.
    </description>
    </property>
    <property>
    <name>dfs.datanode.plugins</name>
    <value>org.apache.hadoop.thriftfs.DatanodePlugin</value>
    <description>Comma-separated list of datanode plug-ins to be activated.
    </description>
    </property>
    <property>
    <name>dfs.thrift.address</name>
    <value>0.0.0.0:10090</value>

    </property>
    </configuration>


    ________________________________
    From: Harsh J <harsh@cloudera.com>
    To: cdh-user@cloudera.org
    Cc: sean wagner <wagz211@yahoo.com>
    Sent: Friday, August 26, 2011 12:28 AM
    Subject: Re: Error while trying to start hadoop on ubuntu lucene first time.

    Also, please paste contents of your hdfs-site.xml and core-site.xml? I
    believe I have reproduced this issue and found a solution, and would
    like to confirm.
    On Fri, Aug 26, 2011 at 8:35 AM, Harsh J wrote:
    Hello Sean,

    Welcome to the hadoop mailing lists, and thanks for asking your
    question supplied with good data!

    Moving this to cdh-user@cloudera.org list as you're using the CDH3
    version of Apache Hadoop. (bcc'd common-user@hadoop.apache.org and
    cc'd you)

    Some questions below:

    - Can you run "sudo -u hdfs df -kh /var/lib/hadoop-0.20/cache/dfs"?
    What does the output carry?
    On Fri, Aug 26, 2011 at 6:23 AM, sean wagner wrote:


    Can anyone offer me some insight. It may have been due to me trying to run the start-all.sh script instead of starting the services. Not sure.

    Thanks
    Sean



    /************************************************************
    STARTUP_MSG: Starting NameNode
    STARTUP_MSG:   host = ubuntu-mogile-1/127.0.1.1
    STARTUP_MSG:   args = []
    STARTUP_MSG:   version = 0.20.2-cdh3u1
    STARTUP_MSG:   build = file:///tmp/nightly_2011-07-18_07-57-52_3/hadoop-0.20-0.20.2+923.97-1~lucid -r bdafb1dbffd0d5f2fbc6ee022e1c8df6500fd638; compiled by 'root' on Mon Jul 18 09:40:01 PDT 2011
    ************************************************************/
    2011-08-25 17:16:48,653 INFO org.apache.hadoop.metrics.jvm.JvmMetrics:
    Initializing JVM Metrics with processName=NameNode, sessionId=null
    2011-08-25 17:16:48,655 INFO org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics: Initializing NameNodeMeterics using context object:org.apache.hadoop.metrics.spi.NoEmitMetricsContext
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: VM type       = 64-bit
    2011-08-25 17:16:48,664 INFO org.apache.hadoop.hdfs.util.GSet: 2% max memory = 17.77875 MB
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: capacity      = 2^21 = 2097152 entries
    2011-08-25 17:16:48,665 INFO org.apache.hadoop.hdfs.util.GSet: recommended=2097152, actual=2097152
    2011-08-25 17:16:48,676 ERROR org.apache.hadoop.hdfs.server.namenode.FSNamesystem: FSNamesystem initialization failed.
    java.io.IOException: Expecting a line not the end of stream
    at
    org.apache.hadoop.fs.DF.parseExecResult(DF.java:117)
    at org.apache.hadoop.util.Shell.runCommand(Shell.java:237)
    at org.apache.hadoop.util.Shell.run(Shell.java:182)
    at org.apache.hadoop.fs.DF.getFilesystem(DF.java:63)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.addDirsToCheck(NameNodeResourceChecker.java:87)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.<init>(NameNodeResourceChecker.java:71)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:348)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:327)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:271)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:465)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1224)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)
    2011-08-25 17:16:48,678 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: java.lang.NullPointerException
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.close(FSNamesystem.java:560)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:330)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:271)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:465)
    at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1224)
    at
    org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1233)

    2011-08-25 17:16:48,678 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
    /************************************************************
    SHUTDOWN_MSG: Shutting down NameNode at ubuntu-mogile-1/127.0.1.1
    ************************************************************/



    20$ sudo ls -lR
    .:
    total 4
    drwxrwxrwx 4 root root 4096 2011-08-25 17:15 cache

    ./cache:
    total 8
    drwxr-xr-x 3 hdfs   hdfs   4096 2011-08-25 17:15 hdfs
    drwxr-xr-x 3 mapred mapred 4096 2011-08-25 17:15 mapred

    ./cache/hdfs:
    total 4
    drwxr-xr-x 3 hdfs hdfs 4096 2011-08-25 17:15 dfs

    ./cache/hdfs/dfs:
    total 4
    drwx------ 2 hdfs hdfs 4096 2011-08-25 17:15 data

    ./cache/hdfs/dfs/data:
    total 0

    ./cache/mapred:
    total 4
    drwxr-xr-x 3 mapred mapred 4096
    2011-08-25 17:15 mapred

    ./cache/mapred/mapred:
    total 4
    drwxr-xr-x 6 mapred mapred 4096 2011-08-25 17:16 local

    ./cache/mapred/mapred/local:
    total 16
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 taskTracker
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 toBeDeleted
    drwxr-xr-x 2 mapred mapred 4096 2011-08-25 17:16 tt_log_tmp
    drwx------ 2 mapred mapred 4096 2011-08-25 17:16 ttprivate

    ./cache/mapred/mapred/local/taskTracker:
    total 0

    ./cache/mapred/mapred/local/toBeDeleted:
    total 0

    ./cache/mapred/mapred/local/tt_log_tmp:
    total 0

    ./cache/mapred/mapred/local/ttprivate:
    total 0


    --
    Harsh J


    --
    Harsh J


    --
    Harsh J

Related Discussions

Discussion Navigation
viewthread | post
Discussion Overview
groupcommon-user @
categorieshadoop
postedAug 26, '11 at 12:53a
activeAug 29, '11 at 5:29p
posts8
users3
websitehadoop.apache.org...
irc#hadoop

People

Translate

site design / logo © 2022 Grokbase