You are on page 1of 19

Hadoop 15

2013-08-22

TaskTracker task task TaskTracker job jar TaskTracker TaskTracker distributed cache job task jar TaskRunner taskTaskRunner JVM task child JVM TaskTracker TaskTracker.offerService() TaskTracker JobTracker heartbeat reponse LaunchTaskAction addToTaskQueue map task mapLancher( TaskLauncher) reduce task reduceLancher( TaskLauncher) TaskLauncher run queue TaskInProgress startNewTask(TaskInProgress tip) task localizeJob(TaskInProgress tip)

launchTaskForJob(TaskInProgress tip, JobConf jobConf, RunningJob rjob) Task


RunningJob rjob = localizeJob(tip); tip.getTask().setJobFile(rjob.getLocalizedJobConf().toString()); // Localization is done. Neither rjob.jobConf nor rjob.ugi can be null launchTaskForJob(tip, new JobConf(rjob.getJobConf()), rjob);

localizeJob() job workDir job jar HDFS RunJar.unJar()


1

RunningJob addTaskToJob() runningJobs addTaskToJob runningJob tasks runningJob runningJobs launchTaskForJob() Task launchTaskForJob() TaskInProgress. launchTask(RunningJob rjob)
/** * Kick off the task execution */ public synchronized void launchTask(RunningJob rjob) throws IOException { if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED || this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN || this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) { localizeTask(task); if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED) { this.taskStatus.setRunState(TaskStatus.State.RUNNING); } setTaskRunner(task.createRunner(TaskTracker.this, this, rjob)); this.runner.start(); long now = System.currentTimeMillis(); this.taskStatus.setStartTime(now); this.lastProgressReport = now; } else { LOG.info("Not launching task: " + task.getTaskID() + " since it's state is " + this.taskStatus.getRunState()); } }

localizeTask() jobConf Task createRunner() TaskRunner start() Task java task.createRunner () Task MapTask ReduceTask Map Reduce TaskRunner MapTask MapTaskRunner Task ReduceTask ReduceTaskRunner

TaskRunner TaskRunner.start() TaskRunner run() java workDir CLASSPATH job jar JvmManager TaskTracker Task JvmRunner JvmManager launchJvm map reduce, JvmRunner JvmManagerForType JvmManagerForType reapJvm() JVM JvmManagerForType idle Job spawnNewJvm spawnNewJvm JvmRunner run run runChildrunChild TaskController DefaultTaskController LinuxTaskController
public void runChild(JvmEnv env) throws IOException, InterruptedException{ int exitCode = 0; try { env.vargs.add(Integer.toString(jvmId.getId())); TaskRunner runner = jvmToRunningTask.get(jvmId); if (runner != null) { Task task = runner.getTask(); //Launch the task controller to run task JVM String user = task.getUser(); TaskAttemptID taskAttemptId = task.getTaskID(); String taskAttemptIdStr = task.isTaskCleanupTask() ? (taskAttemptId.toString() + TaskTracker.TASK_CLEANUP_SUFFIX) : taskAttemptId.toString(); exitCode = tracker.getTaskController().launchTask(user,
3

jvmId.jobId.toString(), taskAttemptIdStr, env.setup, env.vargs, env.workDir, env.stdout.toString(), env.stderr.toString()); } }

TaskController DefaultTaskController LinuxTaskController launchTask() DefaultTaskController


/** * Create all of the directories for the task and launches the child jvm. * @param user the user name * @param attemptId the attempt id * @throws IOException */ @Override public int launchTask(String user, String jobId, String attemptId, List<String> setup, List<String> jvmArguments, File currentWorkDirectory, String stdout, String stderr) throws IOException { ShellCommandExecutor shExec = null; try { FileSystem localFs = FileSystem.getLocal(getConf()); //create the attempt dirs new Localizer(localFs, getConf().getStrings(JobConf.MAPRED_LOCAL_DIR_PROPERTY)). initializeAttemptDirs(user, jobId, attemptId); // create the working-directory of the task if (!currentWorkDirectory.mkdir()) { throw new IOException("Mkdirs failed to create " + currentWorkDirectory.toString()); } //mkdir the loglocation String logLocation = TaskLog.getAttemptDir(jobId, attemptId).toString(); if (!localFs.mkdirs(new Path(logLocation))) { throw new IOException("Mkdirs failed to create "
4

+ logLocation); } //read the configuration for the job FileSystem rawFs = FileSystem.getLocal(getConf()).getRaw(); long logSize = 0; //TODO MAPREDUCE-1100 // get the JVM command line. String cmdLine = TaskLog.buildCommandLine(setup, jvmArguments, new File(stdout), new File(stderr), logSize, true); // write the command to a file in the // task specific cache directory // TODO copy to user dir Path p = new Path(allocator.getLocalPathForWrite( TaskTracker.getPrivateDirTaskScriptLocation(user, jobId, attemptId), getConf()), COMMAND_FILE); String commandFile = writeCommand(cmdLine, rawFs, p); rawFs.setPermission(p, TaskController.TASK_LAUNCH_SCRIPT_PERMISSION); shExec = new ShellCommandExecutor(new String[]{ "bash", "-c", commandFile}, currentWorkDirectory); shExec.execute(); } catch (Exception e) { if (shExec == null) { return -1; } int exitCode = shExec.getExitCode(); LOG.warn("Exit code from task is : " + exitCode); LOG.info("Output from DefaultTaskController's launchTask follows:"); logOutput(shExec.getOutput()); return exitCode; } return 0; }

launchTask() Shell JVM Child.main() map task reduce task Child Child main
5

getTask(jvmId) Task Task run() MapReduce Map -->Shuffle-->Reduce

MapTask run()run()
public void run(final JobConf job, final TaskUmbilicalProtocol umbilical) throws IOException, ClassNotFoundException, InterruptedException { this.umbilical = umbilical; // start thread that will handle communication with parent TaskReporter reporter = new TaskReporter(getProgress(), umbilical, jvmContext); reporter.startCommunicationThread(); boolean useNewApi = job.getUseNewMapper(); initialize(job, getJobID(), reporter, useNewApi); // check if it is a cleanupJobTask if (jobCleanup) { runJobCleanupTask(umbilical, reporter); return; } if (jobSetup) { runJobSetupTask(umbilical, reporter); return; } if (taskCleanup) { runTaskCleanupTask(umbilical, reporter); return;
6

} if (useNewApi) { runNewMapper(job, splitMetaInfo, umbilical, reporter); } else { runOldMapper(job, splitMetaInfo, umbilical, reporter); } done(umbilical, reporter); }

run() TaskReporter runJobCleanupTaskrunJobSetupTaskrunTaskCleanupTask Mapper MapReduce APIMapTask API MapTask Mapper runNewMapper runOldMapper runOldMapper
private <INKEY,INVALUE,OUTKEY,OUTVALUE> void runOldMapper(final JobConf job, final TaskSplitIndex splitIndex, final TaskUmbilicalProtocol umbilical, TaskReporter reporter ) throws IOException, InterruptedException, ClassNotFoundException { InputSplit inputSplit = getSplitDetails(new Path(splitIndex.getSplitLocation()), splitIndex.getStartOffset()); updateJobWithSplit(job, inputSplit); reporter.setInputSplit(inputSplit); RecordReader<INKEY,INVALUE> in = isSkipping() ? new SkippingRecordReader<INKEY,INVALUE>(inputSplit, umbilical, reporter) : new TrackedRecordReader<INKEY,INVALUE>(inputSplit, job, reporter); job.setBoolean("mapred.skip.on", isSkipping());

int numReduceTasks = conf.getNumReduceTasks(); LOG.info("numReduceTasks: " + numReduceTasks); MapOutputCollector collector = null; if (numReduceTasks > 0) { collector = new MapOutputBuffer(umbilical, job, reporter); } else { collector = new DirectMapOutputCollector(umbilical, job, reporter); } MapRunnable<INKEY,INVALUE,OUTKEY,OUTVALUE> runner =
7

ReflectionUtils.newInstance(job.getMapRunnerClass(), job); try { runner.run(in, new OldOutputCollector(collector, conf), reporter); collector.flush(); } finally { //close in.close(); // close input collector.close(); } }

runOldMapper() Mapper InputSplit Mapper RecordReader map Mapper MapOutputCollector Reducer DirectMapOutputCollector MapOutputBuffer MapRunner run()
public void run(RecordReader<K1, V1> input, OutputCollector<K2, V2> output, Reporter reporter) throws IOException { try { // allocate key & value instances that are re-used for all entries K1 key = input.createKey(); V1 value = input.createValue(); while (input.next(key, value)) { // map pair to output mapper.map(key, value, output, reporter); if(incrProcCount) { reporter.incrCounter(SkipBadRecords.COUNTER_GROUP, SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS, 1); } } } finally { mapper.close(); } }

MapRunner run() keyvalue InputSplit <keyvalue> Mapper map OutputCollector kv


8

kv spill combine OutputCollector map kv spill combine MapOutputCollector MapOutputBuffer DirectMapOutputCollector DirectMapOutputCollector Reduce Mapper reduce MapOutputBuffer MapOutputBuffer map MapOutputBuffer collect()
public synchronized void collect(K key, V value, int partition ) throws IOException { reporter.progress(); if (key.getClass() != keyClass) { throw new IOException("Type mismatch in key from map: expected " + keyClass.getName() + ", recieved " + key.getClass().getName()); } if (value.getClass() != valClass) { throw new IOException("Type mismatch in value from map: expected " + valClass.getName() + ", recieved " + value.getClass().getName()); } final int kvnext = (kvindex + 1) % kvoffsets.length; spillLock.lock(); try { boolean kvfull; do { if (sortSpillException != null) { throw (IOException)new IOException("Spill failed" ).initCause(sortSpillException); } // sufficient acct space kvfull = kvnext == kvstart; final boolean kvsoftlimit = ((kvnext > kvend) ? kvnext - kvend > softRecordLimit : kvend - kvnext <= kvoffsets.length - softRecordLimit); if (kvstart == kvend && kvsoftlimit) {
9

LOG.info("Spilling map output: record full = " + kvsoftlimit); startSpill(); } if (kvfull) { try { while (kvstart != kvend) { reporter.progress(); spillDone.await(); } } catch (InterruptedException e) { throw (IOException)new IOException( "Collector interrupted while waiting for the writer" ).initCause(e); } } } while (kvfull); } finally { spillLock.unlock(); } try { // serialize key bytes into buffer int keystart = bufindex; keySerializer.serialize(key); if (bufindex < keystart) { // wrapped the key; reset required bb.reset(); keystart = 0; } // serialize value bytes into buffer final int valstart = bufindex; valSerializer.serialize(value); int valend = bb.markRecord(); if (partition < 0 || partition >= partitions) { throw new IOException("Illegal partition for " + key + " (" + partition + ")"); } mapOutputRecordCounter.increment(1); mapOutputByteCounter.increment(valend >= keystart ? valend - keystart : (bufvoid - keystart) + valend);

10

// update accounting info int ind = kvindex * ACCTSIZE; kvoffsets[kvindex] = ind; kvindices[ind + PARTITION] = partition; kvindices[ind + KEYSTART] = keystart; kvindices[ind + VALSTART] = valstart; kvindex = kvnext; } catch (MapBufferTooSmallException e) { LOG.info("Record too large for in-memory buffer: " + e.getMessage()); spillSingleRecord(key, value, partition); mapOutputRecordCounter.increment(1); return; } }

map 100M io.sort.mb 80% io.sort.spill.percent spill spillThread spill 1 spill sortAndSpill partition key QuickSort 2 combiner CombinerRunner combine combin buffer spill sortAndSpill

11

private void sortAndSpill() throws IOException, ClassNotFoundException, InterruptedException { //approximate the length of the output file to be the length of the //buffer + header lengths for the partitions long size = (bufend >= bufstart ? bufend - bufstart : (bufvoid - bufend) + bufstart) + partitions * APPROX_HEADER_LENGTH; FSDataOutputStream out = null; try { // create spill file final SpillRecord spillRec = new SpillRecord(partitions); final Path filename = mapOutputFile.getSpillFileForWrite(numSpills, size); out = rfs.create(filename); final int endPosition = (kvend > kvstart) ? kvend : kvoffsets.length + kvend; sorter.sort(MapOutputBuffer.this, kvstart, endPosition, reporter); int spindex = kvstart; IndexRecord rec = new IndexRecord(); InMemValBytes value = new InMemValBytes(); for (int i = 0; i < partitions; ++i) { IFile.Writer<K, V> writer = null; try { long segmentStart = out.getPos(); writer = new Writer<K, V>(job, out, keyClass, valClass, codec, spilledRecordsCounter); if (combinerRunner == null) { // spill directly
12

DataInputBuffer key = new DataInputBuffer(); while (spindex < endPosition && kvindices[kvoffsets[spindex % kvoffsets.length] + PARTITION] == i) { final int kvoff = kvoffsets[spindex % kvoffsets.length]; getVBytesForOffset(kvoff, value); key.reset(kvbuffer, kvindices[kvoff + KEYSTART], (kvindices[kvoff + VALSTART] kvindices[kvoff + KEYSTART])); writer.append(key, value); ++spindex; } } else { int spstart = spindex; while (spindex < endPosition && kvindices[kvoffsets[spindex % kvoffsets.length] + PARTITION] == i) { ++spindex; } // Note: we would like to avoid the combiner if we've fewer // than some threshold of records for a partition if (spstart != spindex) { combineCollector.setWriter(writer); RawKeyValueIterator kvIter = new MRResultIterator(spstart, spindex); combinerRunner.combine(kvIter, combineCollector); } } // close the writer writer.close(); // record offsets rec.startOffset = segmentStart; rec.rawLength = writer.getRawLength(); rec.partLength = writer.getCompressedLength(); spillRec.putIndex(rec, i); writer = null; } finally { if (null != writer) writer.close(); } }

13

if (totalIndexCacheMemory >= INDEX_CACHE_MEMORY_LIMIT) { // create spill index file Path indexFilename = mapOutputFile.getSpillIndexFileForWrite(numSpills, partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH); spillRec.writeToFile(indexFilename, job); } else { indexCacheList.add(spillRec); totalIndexCacheMemory += spillRec.size() * MAP_OUTPUT_INDEX_RECORD_LENGTH; } LOG.info("Finished spill " + numSpills); ++numSpills; } finally { if (out != null) out.close(); } }

map MapOutputBuffer flush sortAndSpill buffer mergeParts spill map combia 1. 2. mapred.compress.map.out true Map Reduce ReduceTask.run MapTask initialize() runJobCleanupTask() runJobSetupTask() runTaskCleanupTask() CopySortReduce
public void run(JobConf job, final TaskUmbilicalProtocol umbilical) throws IOException, InterruptedException, ClassNotFoundException { this.umbilical = umbilical; job.setBoolean("mapred.skip.on", isSkipping()); if (isMapOrReduce()) { copyPhase = getProgress().addPhase("copy");
14

sortPhase = getProgress().addPhase("sort"); reducePhase = getProgress().addPhase("reduce"); } // start thread that will handle communication with parent TaskReporter reporter = new TaskReporter(getProgress(), umbilical, jvmContext); reporter.startCommunicationThread(); boolean useNewApi = job.getUseNewReducer(); initialize(job, getJobID(), reporter, useNewApi); // check if it is a cleanupJobTask if (jobCleanup) { runJobCleanupTask(umbilical, reporter); return; } if (jobSetup) { runJobSetupTask(umbilical, reporter); return; } if (taskCleanup) { runTaskCleanupTask(umbilical, reporter); return; } // Initialize the codec codec = initCodec(); boolean isLocal = "local".equals(job.get("mapred.job.tracker", "local")); if (!isLocal) { reduceCopier = new ReduceCopier(umbilical, job, reporter); if (!reduceCopier.fetchOutputs()) { if(reduceCopier.mergeThrowable instanceof FSError) { throw (FSError)reduceCopier.mergeThrowable; } throw new IOException("Task: " + getTaskID() + " - The reduce copier failed", reduceCopier.mergeThrowable); } } copyPhase.complete(); // copy is already complete setPhase(TaskStatus.Phase.SORT); statusUpdate(umbilical); final FileSystem rfs = FileSystem.getLocal(job).getRaw(); RawKeyValueIterator rIter = isLocal
15

? Merger.merge(job, rfs, job.getMapOutputKeyClass(), job.getMapOutputValueClass(), codec, getMapFiles(rfs, true), !conf.getKeepFailedTaskFiles(), job.getInt("io.sort.factor", 100), new Path(getTaskID().toString()), job.getOutputKeyComparator(), reporter, spilledRecordsCounter, null) : reduceCopier.createKVIterator(job, rfs, reporter); // free up the data structures mapOutputFilesOnDisk.clear(); sortPhase.complete(); // sort is complete setPhase(TaskStatus.Phase.REDUCE); statusUpdate(umbilical); Class keyClass = job.getMapOutputKeyClass(); Class valueClass = job.getMapOutputValueClass(); RawComparator comparator = job.getOutputValueGroupingComparator(); if (useNewApi) { runNewReducer(job, umbilical, reporter, rIter, comparator, keyClass, valueClass); } else { runOldReducer(job, umbilical, reporter, rIter, comparator, keyClass, valueClass); } done(umbilical, reporter); }

Reduce runOldReducer() API runOldReducer


private <INKEY,INVALUE,OUTKEY,OUTVALUE> void runOldReducer(JobConf job, TaskUmbilicalProtocol umbilical, final TaskReporter reporter, RawKeyValueIterator rIter, RawComparator<INKEY> comparator, Class<INKEY> keyClass, Class<INVALUE> valueClass) throws IOException { Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer = ReflectionUtils.newInstance(job.getReducerClass(), job); // make output collector String finalName = getOutputName(getPartition()); final RecordWriter<OUTKEY, OUTVALUE> out = new
16

OldTrackingRecordWriter<OUTKEY, OUTVALUE>( reduceOutputCounter, job, reporter, finalName); OutputCollector<OUTKEY,OUTVALUE> collector = new OutputCollector<OUTKEY,OUTVALUE>() { public void collect(OUTKEY key, OUTVALUE value) throws IOException { out.write(key, value); // indicate that progress update needs to be sent reporter.progress(); } }; // apply reduce function try { //increment processed counter only if skipping feature is enabled boolean incrProcCount = SkipBadRecords.getReducerMaxSkipGroups(job)>0 && SkipBadRecords.getAutoIncrReducerProcCount(job); ReduceValuesIterator<INKEY,INVALUE> values = isSkipping() ? new SkippingReduceValuesIterator<INKEY,INVALUE>(rIter, comparator, keyClass, valueClass, job, reporter, umbilical) : new ReduceValuesIterator<INKEY,INVALUE>(rIter, job.getOutputValueGroupingComparator(), keyClass, valueClass, job, reporter); values.informReduceProgress(); while (values.more()) { reduceInputKeyCounter.increment(1); reducer.reduce(values.getKey(), values, collector, reporter); if(incrProcCount) { reporter.incrCounter(SkipBadRecords.COUNTER_GROUP, SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS, 1); } values.nextKey(); values.informReduceProgress(); } //Clean up: repeated in catch block below reducer.close(); out.close(reporter); //End of clean up. } catch (IOException ioe) { try {
17

reducer.close(); } catch (IOException ignored) {} try { out.close(reporter); } catch (IOException ignored) {} throw ioe; } }

runOldReducer() OutputCollector MapTask OutputCollector RecordWriter collect write RecordWriter HDFSReduceTask KeyClass ValueClass KeyComparator Reducer Iterator ReducerReduce MapReduce Map -->Shuffle-->Reduce Map Reduce Map Shuffle Reduce Shuffle job

18

JobTracker task job JobClient JobTracker job runJob

19

You might also like