FAQ
Hi Dear

Plateform: Ubuntu
Hadoop & Hbase From CDH3

Tool: NetBeans 6.0.9

I am trying to write Hbase Batch Import Insertion Method, I am new in Hbase
& Hadoop.
Can any one tell me example, Or can give info link.
I have tried this one, Please see it and indicate any error, I will be thank
full to you,


import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat;
import org.apache.hadoop.mapreduce.Mapper.Context;

import java.io.IOException;
import java.util.*;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

public class NewMain {

//public static class Map extends Mapper<LongWritable, Text, Text,
IntWritable> {

public static class Map extends Mapper {

public void map(LongWritable key, Text value, Context context) throws
IOException, InterruptedException {


String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
//word.set(tokenizer.nextToken());
System.out.println(tokenizer.nextToken());
ImmutableBytesWritable r = new
ImmutableBytesWritable(Bytes.toBytes(tokenizer.nextToken()));
KeyValue kv = new
KeyValue(Bytes.toBytes(tokenizer.nextToken()),Bytes.toBytes(tokenizer.nextToken()),
Bytes.toBytes(tokenizer.nextToken()), Bytes.toBytes(tokenizer.nextToken()));
context.write(r,kv);
}
}
}

public static class KeyValueSortReducer extends Reducer{

protected void reduce(ImmutableBytesWritable row,
java.lang.Iterable<IntWritable> kvs,Reducer.Context context)
throws java.io.IOException, InterruptedException {
// for each key value k, context.write(row, k);
int sum = 0;
for (IntWritable val : kvs) {
sum += val.get();
}
context.write( row, new IntWritable(sum));
}

}



public static void main(String[] args) throws Exception {

Configuration config = new Configuration();
Job job = new Job(config);
job.setJobName("Member Claims Schema type 2 Load Job Optimisation 1");

Configuration conf = HBaseConfiguration.create();
Job jobh = new Job(conf, "Hbase");
HTable htable = new HTable(conf, "MemberDemographicsClaim");
//htable.setAutoFlush(false);
//job.setJarByClass(MemberClaimsBulkLoader.class);
job.setMapperClass(Map.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(KeyValue.class);

job.setInputFormatClass(TextInputFormat.class);
//job.setOutputFormatClass(TextOutputFormat.class);

HFileOutputFormat.configureIncrementalLoad(job, htable); // table where
data to be imported which must exist in HBase and need to be passed as
parameter here
Path p1 = new Path("/root/NetBeansProjects/ILHbase/InputFiles");
Path p2 = new Path("/root/NetBeansProjects/ILHbase/OutputFiles");

FileInputFormat.addInputPath( job, p1);
//FileInputFormat.setInputPaths(job, new Path(args[1]));
FileOutputFormat.setOutputPath(job, p2);
job.waitForCompletion(true);
}

}
My input file look like this one

RowId Column Family Column Value
1 demographics name N
2 demographics address MMM
3 claim c1 A
4 claim c2 B

Errors that i am getting with this code

- java.lang.IllegalArgumentException: Can't read partitions file
- Caused by: java.io.FileNotFoundException: File _partition.lst does not
exist.

Search Discussions

  • Stack at Sep 7, 2011 at 3:25 pm
    Seems like an issue with the paths you are using:

    - java.lang.IllegalArgumentException: Can't read partitions file
    - Caused by: java.io.FileNotFoundException: File _partition.lst does not
    exist.

    Perhaps its doing local filesystem when the partitions are up in hdfs?
    Change the file spec? Or make sure your configuration is pointing
    at hdfs as the filesystem to use.

    St.Ack
    On Wed, Sep 7, 2011 at 6:15 AM, Arsalan Bilal wrote:
    Hi Dear

    Plateform: Ubuntu
    Hadoop & Hbase From CDH3

    Tool: NetBeans 6.0.9

    I am trying to write Hbase Batch Import Insertion Method, I am new in Hbase
    & Hadoop.
    Can any one tell me example, Or can give info link.
    I have tried this one, Please see it and indicate any error, I will be thank
    full to you,


    import org.apache.hadoop.hbase.HBaseConfiguration;
    import org.apache.hadoop.hbase.client.HTable;
    import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat;
    import org.apache.hadoop.mapreduce.Mapper.Context;

    import java.io.IOException;
    import java.util.*;

    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.conf.*;
    import org.apache.hadoop.hbase.KeyValue;
    import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
    import org.apache.hadoop.hbase.util.Bytes;
    import org.apache.hadoop.io.*;
    import org.apache.hadoop.mapreduce.*;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
    import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

    public class NewMain {

    //public static class Map extends Mapper<LongWritable, Text, Text,
    IntWritable> {

    public static class Map extends Mapper {

    public void map(LongWritable key, Text value, Context context) throws
    IOException, InterruptedException {


    String line = value.toString();
    StringTokenizer tokenizer = new StringTokenizer(line);
    while (tokenizer.hasMoreTokens()) {
    //word.set(tokenizer.nextToken());
    System.out.println(tokenizer.nextToken());
    ImmutableBytesWritable r = new
    ImmutableBytesWritable(Bytes.toBytes(tokenizer.nextToken()));
    KeyValue kv = new
    KeyValue(Bytes.toBytes(tokenizer.nextToken()),Bytes.toBytes(tokenizer.nextToken()),
    Bytes.toBytes(tokenizer.nextToken()), Bytes.toBytes(tokenizer.nextToken()));
    context.write(r,kv);
    }
    }
    }

    public static class KeyValueSortReducer extends Reducer{

    protected void reduce(ImmutableBytesWritable row,
    java.lang.Iterable<IntWritable> kvs,Reducer.Context context)
    throws java.io.IOException, InterruptedException {
    // for each key value k, context.write(row, k);
    int sum = 0;
    for (IntWritable val : kvs) {
    sum += val.get();
    }
    context.write( row, new IntWritable(sum));
    }

    }



    public static void main(String[] args) throws Exception {

    Configuration config = new Configuration();
    Job job = new Job(config);
    job.setJobName("Member Claims Schema type 2 Load Job Optimisation 1");

    Configuration conf = HBaseConfiguration.create();
    Job jobh = new Job(conf, "Hbase");
    HTable htable = new HTable(conf, "MemberDemographicsClaim");
    //htable.setAutoFlush(false);
    //job.setJarByClass(MemberClaimsBulkLoader.class);
    job.setMapperClass(Map.class);
    job.setMapOutputKeyClass(ImmutableBytesWritable.class);
    job.setMapOutputValueClass(KeyValue.class);

    job.setInputFormatClass(TextInputFormat.class);
    //job.setOutputFormatClass(TextOutputFormat.class);

    HFileOutputFormat.configureIncrementalLoad(job, htable); // table where
    data to be imported which must exist in HBase and need to be passed as
    parameter here
    Path p1 = new Path("/root/NetBeansProjects/ILHbase/InputFiles");
    Path p2 = new Path("/root/NetBeansProjects/ILHbase/OutputFiles");

    FileInputFormat.addInputPath( job, p1);
    //FileInputFormat.setInputPaths(job, new Path(args[1]));
    FileOutputFormat.setOutputPath(job, p2);
    job.waitForCompletion(true);
    }

    }
    My input file look like this one

    RowId     Column Family     Column     Value
    1     demographics     name    N
    2     demographics     address MMM
    3     claim         c1     A
    4     claim         c2      B

    Errors that i am getting with this code

    - java.lang.IllegalArgumentException: Can't read partitions file
    - Caused by: java.io.FileNotFoundException: File _partition.lst does not
    exist.
  • Jean-Daniel Cryans at Sep 7, 2011 at 4:20 pm
    Same answer as last time this was asked:

    http://search-hadoop.com/m/z1aDB4my9g2

    J-D
    On Wed, Sep 7, 2011 at 6:15 AM, Arsalan Bilal wrote:
    Hi Dear

    Plateform: Ubuntu
    Hadoop & Hbase From CDH3

    Tool: NetBeans 6.0.9

    I am trying to write Hbase Batch Import Insertion Method, I am new in Hbase
    & Hadoop.
    Can any one tell me example, Or can give info link.
    I have tried this one, Please see it and indicate any error, I will be thank
    full to you,


    import org.apache.hadoop.hbase.HBaseConfiguration;
    import org.apache.hadoop.hbase.client.HTable;
    import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat;
    import org.apache.hadoop.mapreduce.Mapper.Context;

    import java.io.IOException;
    import java.util.*;

    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.conf.*;
    import org.apache.hadoop.hbase.KeyValue;
    import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
    import org.apache.hadoop.hbase.util.Bytes;
    import org.apache.hadoop.io.*;
    import org.apache.hadoop.mapreduce.*;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
    import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

    public class NewMain {

    //public static class Map extends Mapper<LongWritable, Text, Text,
    IntWritable> {

    public static class Map extends Mapper {

    public void map(LongWritable key, Text value, Context context) throws
    IOException, InterruptedException {


    String line = value.toString();
    StringTokenizer tokenizer = new StringTokenizer(line);
    while (tokenizer.hasMoreTokens()) {
    //word.set(tokenizer.nextToken());
    System.out.println(tokenizer.nextToken());
    ImmutableBytesWritable r = new
    ImmutableBytesWritable(Bytes.toBytes(tokenizer.nextToken()));
    KeyValue kv = new
    KeyValue(Bytes.toBytes(tokenizer.nextToken()),Bytes.toBytes(tokenizer.nextToken()),
    Bytes.toBytes(tokenizer.nextToken()), Bytes.toBytes(tokenizer.nextToken()));
    context.write(r,kv);
    }
    }
    }

    public static class KeyValueSortReducer extends Reducer{

    protected void reduce(ImmutableBytesWritable row,
    java.lang.Iterable<IntWritable> kvs,Reducer.Context context)
    throws java.io.IOException, InterruptedException {
    // for each key value k, context.write(row, k);
    int sum = 0;
    for (IntWritable val : kvs) {
    sum += val.get();
    }
    context.write( row, new IntWritable(sum));
    }

    }



    public static void main(String[] args) throws Exception {

    Configuration config = new Configuration();
    Job job = new Job(config);
    job.setJobName("Member Claims Schema type 2 Load Job Optimisation 1");

    Configuration conf = HBaseConfiguration.create();
    Job jobh = new Job(conf, "Hbase");
    HTable htable = new HTable(conf, "MemberDemographicsClaim");
    //htable.setAutoFlush(false);
    //job.setJarByClass(MemberClaimsBulkLoader.class);
    job.setMapperClass(Map.class);
    job.setMapOutputKeyClass(ImmutableBytesWritable.class);
    job.setMapOutputValueClass(KeyValue.class);

    job.setInputFormatClass(TextInputFormat.class);
    //job.setOutputFormatClass(TextOutputFormat.class);

    HFileOutputFormat.configureIncrementalLoad(job, htable); // table where
    data to be imported which must exist in HBase and need to be passed as
    parameter here
    Path p1 = new Path("/root/NetBeansProjects/ILHbase/InputFiles");
    Path p2 = new Path("/root/NetBeansProjects/ILHbase/OutputFiles");

    FileInputFormat.addInputPath( job, p1);
    //FileInputFormat.setInputPaths(job, new Path(args[1]));
    FileOutputFormat.setOutputPath(job, p2);
    job.waitForCompletion(true);
    }

    }
    My input file look like this one

    RowId     Column Family     Column     Value
    1     demographics     name    N
    2     demographics     address MMM
    3     claim         c1     A
    4     claim         c2      B

    Errors that i am getting with this code

    - java.lang.IllegalArgumentException: Can't read partitions file
    - Caused by: java.io.FileNotFoundException: File _partition.lst does not
    exist.

Related Discussions

Discussion Navigation
viewthread | post
Discussion Overview
groupuser @
categorieshbase, hadoop
postedSep 7, '11 at 1:15p
activeSep 7, '11 at 4:20p
posts3
users3
websitehbase.apache.org

People

Translate

site design / logo © 2022 Grokbase