public class JobProgressListener extends Object implements SparkListener, Logging
All access to the data structures in this class must be synchronized on the class, since the UI thread and the EventBus loop may otherwise be reading and updating the internal data structures concurrently.
| Constructor and Description |
|---|
JobProgressListener(SparkConf conf) |
| Modifier and Type | Method and Description |
|---|---|
scala.collection.mutable.HashMap<Object,UIData.JobUIData> |
activeJobs() |
scala.collection.mutable.HashMap<Object,StageInfo> |
activeStages() |
scala.collection.Seq<BlockManagerId> |
blockManagerIds() |
scala.collection.mutable.ListBuffer<UIData.JobUIData> |
completedJobs() |
scala.collection.mutable.ListBuffer<StageInfo> |
completedStages() |
static String |
DEFAULT_POOL_NAME() |
static int |
DEFAULT_RETAINED_JOBS() |
static int |
DEFAULT_RETAINED_STAGES() |
scala.collection.mutable.HashMap<String,BlockManagerId> |
executorIdToBlockManagerId() |
scala.collection.mutable.ListBuffer<UIData.JobUIData> |
failedJobs() |
scala.collection.mutable.ListBuffer<StageInfo> |
failedStages() |
scala.collection.immutable.Map<String,Object> |
getSizesOfActiveStateTrackingCollections() |
scala.collection.immutable.Map<String,Object> |
getSizesOfHardSizeLimitedCollections() |
scala.collection.immutable.Map<String,Object> |
getSizesOfSoftSizeLimitedCollections() |
scala.collection.mutable.HashMap<Object,UIData.JobUIData> |
jobIdToData() |
int |
numCompletedStages() |
int |
numFailedStages() |
void |
onBlockManagerAdded(SparkListenerBlockManagerAdded blockManagerAdded)
Called when a new block manager has joined
|
void |
onBlockManagerRemoved(SparkListenerBlockManagerRemoved blockManagerRemoved)
Called when an existing block manager has been removed
|
void |
onEnvironmentUpdate(SparkListenerEnvironmentUpdate environmentUpdate)
Called when environment properties have been updated
|
void |
onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate executorMetricsUpdate)
Called when the driver receives task metrics from an executor in a heartbeat.
|
void |
onJobEnd(SparkListenerJobEnd jobEnd)
Called when a job ends
|
void |
onJobStart(SparkListenerJobStart jobStart)
Called when a job starts
|
void |
onStageCompleted(SparkListenerStageCompleted stageCompleted)
Called when a stage completes successfully or fails, with information on the completed stage.
|
void |
onStageSubmitted(SparkListenerStageSubmitted stageSubmitted)
For FIFO, all stages are contained by "default" pool but "default" pool here is meaningless
|
void |
onTaskEnd(SparkListenerTaskEnd taskEnd)
Called when a task ends
|
void |
onTaskGettingResult(SparkListenerTaskGettingResult taskGettingResult)
Called when a task begins remotely fetching its result (will not be called for tasks that do
not need to fetch the result remotely).
|
void |
onTaskStart(SparkListenerTaskStart taskStart)
Called when a task starts
|
scala.collection.mutable.HashMap<Object,StageInfo> |
pendingStages() |
scala.collection.mutable.HashMap<String,scala.collection.mutable.HashMap<Object,StageInfo>> |
poolToActiveStages() |
int |
retainedJobs() |
int |
retainedStages() |
scala.Option<scala.Enumeration.Value> |
schedulingMode() |
scala.collection.mutable.ListBuffer<StageInfo> |
skippedStages() |
scala.collection.mutable.HashMap<Object,scala.collection.mutable.HashSet<Object>> |
stageIdToActiveJobIds() |
scala.collection.mutable.HashMap<scala.Tuple2<Object,Object>,UIData.StageUIData> |
stageIdToData() |
scala.collection.mutable.HashMap<Object,StageInfo> |
stageIdToInfo() |
void |
updateAggregateMetrics(UIData.StageUIData stageData,
String execId,
org.apache.spark.executor.TaskMetrics taskMetrics,
scala.Option<org.apache.spark.executor.TaskMetrics> oldMetrics)
Upon receiving new metrics for a task, updates the per-stage and per-executor-per-stage
aggregate metrics by calculating deltas between the currently recorded metrics and the new
metrics.
|
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, waitonApplicationEnd, onApplicationStart, onExecutorAdded, onExecutorRemoved, onUnpersistRDDinitializeIfNecessary, initializeLogging, isTraceEnabled, log_, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarningpublic JobProgressListener(SparkConf conf)
public static String DEFAULT_POOL_NAME()
public static int DEFAULT_RETAINED_STAGES()
public static int DEFAULT_RETAINED_JOBS()
public scala.collection.mutable.HashMap<Object,UIData.JobUIData> activeJobs()
public scala.collection.mutable.ListBuffer<UIData.JobUIData> completedJobs()
public scala.collection.mutable.ListBuffer<UIData.JobUIData> failedJobs()
public scala.collection.mutable.HashMap<Object,UIData.JobUIData> jobIdToData()
public scala.collection.mutable.HashMap<Object,StageInfo> pendingStages()
public scala.collection.mutable.HashMap<Object,StageInfo> activeStages()
public scala.collection.mutable.ListBuffer<StageInfo> completedStages()
public scala.collection.mutable.ListBuffer<StageInfo> skippedStages()
public scala.collection.mutable.ListBuffer<StageInfo> failedStages()
public scala.collection.mutable.HashMap<scala.Tuple2<Object,Object>,UIData.StageUIData> stageIdToData()
public scala.collection.mutable.HashMap<Object,StageInfo> stageIdToInfo()
public scala.collection.mutable.HashMap<Object,scala.collection.mutable.HashSet<Object>> stageIdToActiveJobIds()
public scala.collection.mutable.HashMap<String,scala.collection.mutable.HashMap<Object,StageInfo>> poolToActiveStages()
public int numCompletedStages()
public int numFailedStages()
public scala.collection.mutable.HashMap<String,BlockManagerId> executorIdToBlockManagerId()
public scala.collection.Seq<BlockManagerId> blockManagerIds()
public scala.Option<scala.Enumeration.Value> schedulingMode()
public int retainedStages()
public int retainedJobs()
public scala.collection.immutable.Map<String,Object> getSizesOfActiveStateTrackingCollections()
public scala.collection.immutable.Map<String,Object> getSizesOfHardSizeLimitedCollections()
public scala.collection.immutable.Map<String,Object> getSizesOfSoftSizeLimitedCollections()
public void onJobStart(SparkListenerJobStart jobStart)
SparkListeneronJobStart in interface SparkListenerpublic void onJobEnd(SparkListenerJobEnd jobEnd)
SparkListeneronJobEnd in interface SparkListenerpublic void onStageCompleted(SparkListenerStageCompleted stageCompleted)
SparkListeneronStageCompleted in interface SparkListenerpublic void onStageSubmitted(SparkListenerStageSubmitted stageSubmitted)
onStageSubmitted in interface SparkListenerpublic void onTaskStart(SparkListenerTaskStart taskStart)
SparkListeneronTaskStart in interface SparkListenerpublic void onTaskGettingResult(SparkListenerTaskGettingResult taskGettingResult)
SparkListeneronTaskGettingResult in interface SparkListenerpublic void onTaskEnd(SparkListenerTaskEnd taskEnd)
SparkListeneronTaskEnd in interface SparkListenerpublic void updateAggregateMetrics(UIData.StageUIData stageData, String execId, org.apache.spark.executor.TaskMetrics taskMetrics, scala.Option<org.apache.spark.executor.TaskMetrics> oldMetrics)
public void onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate executorMetricsUpdate)
SparkListeneronExecutorMetricsUpdate in interface SparkListenerpublic void onEnvironmentUpdate(SparkListenerEnvironmentUpdate environmentUpdate)
SparkListeneronEnvironmentUpdate in interface SparkListenerpublic void onBlockManagerAdded(SparkListenerBlockManagerAdded blockManagerAdded)
SparkListeneronBlockManagerAdded in interface SparkListenerpublic void onBlockManagerRemoved(SparkListenerBlockManagerRemoved blockManagerRemoved)
SparkListeneronBlockManagerRemoved in interface SparkListener