千家信息网

hadoop-ID的示例分析

发表于:2025-12-02 作者:千家信息网编辑
千家信息网最后更新 2025年12月02日,小编给大家分享一下hadoop-ID的示例分析,相信大部分人都还不怎么了解,因此分享这篇文章给大家参考一下,希望大家阅读完这篇文章后大有收获,下面让我们一起去了解一下吧!我们开始来分析Hadoop M
千家信息网最后更新 2025年12月02日hadoop-ID的示例分析

小编给大家分享一下hadoop-ID的示例分析,相信大部分人都还不怎么了解,因此分享这篇文章给大家参考一下,希望大家阅读完这篇文章后大有收获,下面让我们一起去了解一下吧!

我们开始来分析Hadoop MapReduce的内部的运行机制。用户向Hadoop提交Job(作业),作业在JobTracker对象的控制下执行。Job被分解成为Task(任务),分发到集群中,在TaskTracker的控制下运行。Task包括MapTask和ReduceTask,是MapReduce的Map操作和Reduce操作执行的地方。这中任务分布的方法比较类似于HDFS中NameNode和DataNode的分工,NameNode对应的是JobTracker,DataNode对应的是TaskTracker。JobTracker,TaskTracker和MapReduce的客户端通过RPC通信,具体可以参考HDFS部分的分析。

我们先来分析一些辅助类,首先是和ID有关的类,ID的继承树如下:


* Licensed to the Apache Software Foundation (ASF) under onepackage org.apache.hadoop.mapreduce;import java.io.DataInput;/** * A general identifier, which internally stores the id * as an integer. This is the super class of {@link JobID},  * {@link TaskID} and {@link TaskAttemptID}. *  * @see JobID * @see TaskID * @see TaskAttemptID */public abstract class ID implements WritableComparable {  protected static final char SEPARATOR = '_';  protected int id;  /** constructs an ID object from the given int */  public ID(int id) {    this.id = id;  }  protected ID() {  }  /** returns the int which represents the identifier */  public int getId() {    return id;  }  @Override  public String toString() {    return String.valueOf(id);  }  @Override  public int hashCode() {    return Integer.valueOf(id).hashCode();  }  @Override  public boolean equals(Object o) {    if (this == o)      return true;    if(o == null)      return false;    if (o.getClass() == this.getClass()) {      ID that = (ID) o;      return this.id == that.id;    }    else      return false;  }  /** Compare IDs by associated numbers */  public int compareTo(ID that) {    return this.id - that.id;  }  public void readFields(DataInput in) throws IOException {    this.id = in.readInt();  }  public void write(DataOutput out) throws IOException {    out.writeInt(id);  }  }
* Licensed to the Apache Software Foundation (ASF) under onepackage org.apache.hadoop.mapreduce;import java.io.DataInput;/** * JobID represents the immutable and unique identifier for  * the job. JobID consists of two parts. First part  * represents the jobtracker identifier, so that jobID to jobtracker map  * is defined. For cluster setup this string is the jobtracker  * start time, for local setting, it is "local". * Second part of the JobID is the job number. 
* An example JobID is : * job_200707121733_0003 , which represents the third job * running at the jobtracker started at 200707121733. *

* Applications should never construct or parse JobID strings, but rather * use appropriate constructors or {@link #forName(String)} method. * * @see TaskID * @see TaskAttemptID * @see org.apache.hadoop.mapred.JobTracker#getNewJobId() * @see org.apache.hadoop.mapred.JobTracker#getStartTime() */public class JobID extends org.apache.hadoop.mapred.ID implements Comparable { protected static final String JOB = "job"; private final Text jtIdentifier; protected static final NumberFormat idFormat = NumberFormat.getInstance(); static { idFormat.setGroupingUsed(false); idFormat.setMinimumIntegerDigits(4); } /** * Constructs a JobID object * @param jtIdentifier jobTracker identifier * @param id job number */ public JobID(String jtIdentifier, int id) { super(id); this.jtIdentifier = new Text(jtIdentifier); } public JobID() { jtIdentifier = new Text(); } public String getJtIdentifier() { return jtIdentifier.toString(); } @Override public boolean equals(Object o) { if (!super.equals(o)) return false; JobID that = (JobID)o; return this.jtIdentifier.equals(that.jtIdentifier); } /**Compare JobIds by first jtIdentifiers, then by job numbers*/ @Override public int compareTo(ID o) { JobID that = (JobID)o; int jtComp = this.jtIdentifier.compareTo(that.jtIdentifier); if(jtComp == 0) { return this.id - that.id; } else return jtComp; } /** * Add the stuff after the "job" prefix to the given builder. This is useful, * because the sub-ids use this substring at the start of their string. * @param builder the builder to append to * @return the builder that was passed in */ public StringBuilder appendTo(StringBuilder builder) { builder.append(SEPARATOR); builder.append(jtIdentifier); builder.append(SEPARATOR); builder.append(idFormat.format(id)); return builder; } @Override public int hashCode() { return jtIdentifier.hashCode() + id; } @Override public String toString() { return appendTo(new StringBuilder(JOB)).toString(); } @Override public void readFields(DataInput in) throws IOException { super.readFields(in); this.jtIdentifier.readFields(in); } @Override public void write(DataOutput out) throws IOException { super.write(out); jtIdentifier.write(out); } /** Construct a JobId object from given string * @return constructed JobId object or null if the given String is null * @throws IllegalArgumentException if the given string is malformed */ public static JobID forName(String str) throws IllegalArgumentException { if(str == null) return null; try { String[] parts = str.split("_"); if(parts.length == 3) { if(parts[0].equals(JOB)) { return new org.apache.hadoop.mapred.JobID(parts[1], Integer.parseInt(parts[2])); } } }catch (Exception ex) {//fall below } throw new IllegalArgumentException("JobId string : " + str + " is not properly formed"); } }

以上是"hadoop-ID的示例分析"这篇文章的所有内容,感谢各位的阅读!相信大家都有了一定的了解,希望分享的内容对大家有所帮助,如果还想学习更多知识,欢迎关注行业资讯频道!

0