public class InsertIntoHiveTable extends org.apache.spark.sql.execution.SparkPlan implements org.apache.spark.sql.execution.UnaryNode, HiveInspectors, scala.Product, scala.Serializable
HiveInspectors.typeInfoConversions| Constructor and Description |
|---|
InsertIntoHiveTable(MetastoreRelation table,
scala.collection.immutable.Map<String,scala.Option<String>> partition,
org.apache.spark.sql.execution.SparkPlan child,
boolean overwrite) |
| Modifier and Type | Method and Description |
|---|---|
org.apache.spark.sql.execution.SparkPlan |
child() |
RDD<org.apache.spark.sql.Row> |
execute() |
org.apache.spark.sql.Row[] |
executeCollect() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
output() |
Class<? extends org.apache.hadoop.io.Writable> |
outputClass() |
boolean |
overwrite() |
scala.collection.immutable.Map<String,scala.Option<String>> |
partition() |
void |
saveAsHiveFile(RDD<org.apache.spark.sql.Row> rdd,
Class<?> valueClass,
ShimFileSinkDesc fileSinkConf,
SerializableWritable<org.apache.hadoop.mapred.JobConf> conf,
SparkHiveWriterContainer writerContainer) |
HiveContext |
sc() |
MetastoreRelation |
table() |
codegenEnabled, executeTake, isTraceEnabled, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning, makeCopy, newMutableProjection, newOrdering, newPredicate, newProjection, org$apache$spark$Logging$$log__$eq, org$apache$spark$Logging$$log_, outputPartitioning, requiredChildDistribution, sparkContext, sqlContextexpressions, inputSet, missingInput, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionDown$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionUp$1, outputSet, printSchema, references, schema, schemaString, simpleString, statePrefix, transformAllExpressions, transformExpressions, transformExpressionsDown, transformExpressionsUpapply, argString, asCode, children, collect, fastEquals, flatMap, foreach, foreachUp, generateTreeString, getNodeNumbered, map, mapChildren, nodeName, numberedTreeString, origin, otherCopyArgs, stringArgs, toString, transform, transformChildrenDown, transformChildrenUp, transformDown, transformUp, treeString, withNewChildreninspectorToDataType, javaClassToDataType, toInspector, toInspector, unwrap, wrap, wrap, wrap, wrapperForproductArity, productElement, productIterator, productPrefixinitializeIfNecessary, initializeLogging, log_public InsertIntoHiveTable(MetastoreRelation table, scala.collection.immutable.Map<String,scala.Option<String>> partition, org.apache.spark.sql.execution.SparkPlan child, boolean overwrite)
public MetastoreRelation table()
public scala.collection.immutable.Map<String,scala.Option<String>> partition()
public org.apache.spark.sql.execution.SparkPlan child()
child in interface org.apache.spark.sql.catalyst.trees.UnaryNode<org.apache.spark.sql.execution.SparkPlan>public boolean overwrite()
public HiveContext sc()
public Class<? extends org.apache.hadoop.io.Writable> outputClass()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output()
output in class org.apache.spark.sql.catalyst.plans.QueryPlan<org.apache.spark.sql.execution.SparkPlan>public void saveAsHiveFile(RDD<org.apache.spark.sql.Row> rdd, Class<?> valueClass, ShimFileSinkDesc fileSinkConf, SerializableWritable<org.apache.hadoop.mapred.JobConf> conf, SparkHiveWriterContainer writerContainer)
public org.apache.spark.sql.Row[] executeCollect()
executeCollect in class org.apache.spark.sql.execution.SparkPlanpublic RDD<org.apache.spark.sql.Row> execute()
execute in class org.apache.spark.sql.execution.SparkPlan