December 27, 2023

Distributed Cache in Hadoop

In this post we’ll see what Distributed cache in Hadoop is.

What is a distributed cache

As the name suggests distributed cache in Hadoop is a cache where you can store a file (text, archives, jars etc.) which is distributed across the nodes where mappers and reducers for the MapReduce job are running. That way the cached files are localized for the running map and reduce tasks.

Methods for adding the files in Distributed Cache

There is a DistributedCache class with relevant methods but the whole class is deprecated in Hadoop2. You should be using the methods in Job class instead.

  • public void addCacheFile(URI uri)- Add a file to be localized.
  • public void addCacheArchive(URI uri)- Add archives to be localized.
  • public void addFileToClassPath(Path file)- Adds file path to the current set of classpath entries. It adds the file to cache as well. Files added with this method will not be unpacked while being added to the classpath.
  • public void addArchiveToClassPath(Path archive)- Adds an archive path to the current set of classpath entries. It adds the archive to cache as well. Archive files will be unpacked and added to the classpath when being distributed.

How to use distributed cache

In order to make available a file through distributed cache in Hadoop.

  1. Copy the file you want to make available through distributed cache to HDFS if it is not there already.
  2. Based on the file type use the relevant method to add it to distributed cache.

As example if you want to add a text file to distributed cache then you can use the following statement in your driver class.

job.addCacheFile(new URI("/user/input/test.txt#test"));

If you want to add a jar to the class path then you can do it as follows-

job.addFileToClassPath(new Path("/myapp/mylib.jar"));

Distributed cache example MapReduce code

Here is an Avro MapReduce word count example program. Output file is an Avro data file which uses an Avro schema. This Avro schema is added to the distributed cache using the addCacheFile() method and used by the mappers and reducers.

import java.io.File;
import java.io.IOException;
import java.net.URI;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.avro.mapreduce.AvroJob;
import org.apache.avro.mapreduce.AvroKeyOutputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class AvroWordCount extends Configured implements Tool{
	
  // Map function
  public static class AvroWordMapper extends Mapper<LongWritable, Text, AvroKey, AvroValue>{
    private Text word = new Text();
    private GenericRecord record;
     
    @Override
    protected void setup(Context context)
        throws IOException, InterruptedException {
      // That's where file stored in distributed cache is used
      Schema AVRO_SCHEMA = new Schema.Parser().parse(new File("./wcschema"));
      record = new GenericData.Record(AVRO_SCHEMA);
    }
    public void map(LongWritable key, Text value, Context context) 
        throws IOException, InterruptedException {
      // Splitting the line on spaces
      String[] stringArr = value.toString().split("\\s+");
      for (String str : stringArr) {
        word.set(str);
        // creating Avro record
        record.put("word", str);
        record.put("count", 1);
        context.write(new AvroKey(word), new AvroValue(record));
      }
    }
  }
	
  // Reduce function
  public static class AvroWordReducer extends Reducer<AvroKey, AvroValue,
      AvroKey, NullWritable>{	  
    Schema AVRO_SCHEMA;
    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
      // That's where file stored in distributed cache is used
      AVRO_SCHEMA = new Schema.Parser().parse(new File("./wcschema"));
    }
    public void reduce(AvroKey key, Iterable<AvroValue> values, Context context) 
        throws IOException, InterruptedException {
      int sum = 0;
      for (AvroValue value : values) {
        GenericRecord	record = value.datum();
        sum += (Integer)record.get("count");
      }
      GenericRecord record = new GenericData.Record(AVRO_SCHEMA);
      record.put("word", key.datum());
      record.put("count", sum);
      context.write(new AvroKey(record), NullWritable.get());
    }
  }

  public static void main(String[] args) throws Exception{
    int exitFlag = ToolRunner.run(new AvroWordCount(), args);
    System.exit(exitFlag);
  }
	
  @Override
  public int run(String[] args) throws Exception {
    Configuration conf = new Configuration();
    Job job = Job.getInstance(conf, "AvroWC");
    job.setJarByClass(getClass());
    job.setMapperClass(AvroWordMapper.class);    
    job.setReducerClass(AvroWordReducer.class);
    // Name after the # sign in the file location
    // will be used as the file name in Mapper/Reducer
    job.addCacheFile(new URI("/user/input/wcschema.avsc#wcschema"));
    AvroJob.setMapOutputKeySchema(job, Schema.create(Schema.Type.STRING));
    FileSystem fs = FileSystem.get(conf);
    // Need schema file stored in HDFS here also
    Path path = new Path("/user/input/wcschema.avsc".toString());
    Schema sc = new Schema.Parser().parse((fs.open(path)));
    AvroJob.setMapOutputValueSchema(job, sc);
    AvroJob.setOutputKeySchema(job,	sc);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(AvroKeyOutputFormat.class);
    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    return job.waitForCompletion(true) ? 0 : 1;
  }
}

That's all for the topic Distributed Cache in Hadoop. If something is missing or you have something to share about the topic please write a comment.


You may also like

No comments:

Post a Comment