简体   繁体   中英

Simple Program in Hadoop got ClassNotFoundException

Recently I rewrote the code in WordCount example of hadoop, but when i run it on my virtual machine (ubuntu server 14.04 with both hadoop and java set), i got ClassNotFoundException ... I have already tired many solutions found on the Internet but they didn't work. Anything i can do to fix this? 错误

and my code is :

        package org.apache.hadoop.examples;
        import java.io.IOException;
        import java.util.StringTokenizer;
        import org.apache.hadoop.conf.Configuration;
        import org.apache.hadoop.fs.Path;
        import org.apache.hadoop.io.IntWritable;
        import org.apache.hadoop.io.FloatWritable;
        import org.apache.hadoop.io.Text;
        import org.apache.hadoop.mapreduce.Job;
        import org.apache.hadoop.mapreduce.Mapper;
        import org.apache.hadoop.mapreduce.Reducer;

        import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

        import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

        import org.apache.hadoop.util.GenericOptionsParser;

        public class myhadoop 
        {

            public static int total_number = 0;

            public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> 
            {
                private final static IntWritable one = new IntWritable(1);

                private Text word = new Text();



                public void map(Object key, Text value, Context context) throws IOException, InterruptedException 
                {
                    StringTokenizer itr = new StringTokenizer(value.toString());
                    while (itr.hasMoreTokens()) 
                    {

                        word.set(itr.nextToken());

                        context.write(word, one);

                        total_number = total_number + 1;

                    }

                }
            }

            public static class IntSumCombiner extends Reducer<Text,IntWritable,Text,IntWritable> {

                private IntWritable result = new IntWritable();

                public void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException 
                {

                    int sum = 0;

                    for (IntWritable val : values) {

                    sum += val.get();

                    }

                    result.set(sum);

                    context.write(key, result);

                }

            }

            public static class ResultCountReducer extends Reducer<Text,IntWritable,Text,FloatWritable> {

                private FloatWritable result = new FloatWritable();

                public void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException 
                {

                    int sum = 0;

                    for (IntWritable val : values) {

                    sum += val.get();

                    }
                            float frequncy = sum / total_number;

                    result.set(frequncy);

                    context.write(key, result);

                }

        }



            public static void main(String[] args) throws Exception 
            {

                Configuration conf = new Configuration();

                String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();

                if (otherArgs.length != 2) 
                {

                    System.err.println("Usage: myhadoop <in> <out>");

                    System.exit(2);

                }

                Job job = new Job(conf, "myhadoop");

                job.setJarByClass(myhadoop.class);

                job.setMapperClass(TokenizerMapper.class);

                job.setCombinerClass(IntSumCombiner.class);

                job.setReducerClass(ResultCountReducer.class);

                job.setOutputKeyClass(Text.class);

                job.setOutputValueClass(FloatWritable.class);

                FileInputFormat.addInputPath(job, new Path(otherArgs[0]));

                FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

                System.exit(job.waitForCompletion(true) ? 0 : 1);
            }
        }

Solution From Comment: deleting the first line ie package import

'package org.apache.hadoop.examples;'

Change in code, replace

Job.setJarByClass(),

by

Job.setJar()

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM