text
stringclasses
3 values
<s>[INST] <<SYS>> Below is an instruction that describes a task. Write a response that appropriately completes the request. <</SYS>> Write SparkSession class. [/INST] /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.spark.sqlimport java.io.Closeableimport java.util.{ServiceLoader, UUID}import java.util.concurrent.TimeUnit._import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}import scala.collection.JavaConverters._import scala.reflect.runtime.universe.TypeTagimport scala.util.control.NonFatalimport org.apache.spark.{SPARK_VERSION, SparkConf, SparkContext, TaskContext}import org.apache.spark.annotation.{DeveloperApi, Experimental, Stable, Unstable}import org.apache.spark.api.java.JavaRDDimport org.apache.spark.internal.Loggingimport org.apache.spark.internal.config.{ConfigEntry, EXECUTOR_ALLOW_SPARK_CONTEXT}import org.apache.spark.rdd.RDDimport org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}import org.apache.spark.sql.catalog.Catalogimport org.apache.spark.sql.catalyst._import org.apache.spark.sql.catalyst.analysis.{NameParameterizedQuery, PosParameterizedQuery, UnresolvedRelation}import org.apache.spark.sql.catalyst.encoders._import org.apache.spark.sql.catalyst.expressions.AttributeReferenceimport org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Range}import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributesimport org.apache.spark.sql.catalyst.util.CharVarcharUtilsimport org.apache.spark.sql.connector.ExternalCommandRunnerimport org.apache.spark.sql.errors.QueryCompilationErrorsimport org.apache.spark.sql.execution._import org.apache.spark.sql.execution.command.ExternalCommandExecutorimport org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}import org.apache.spark.sql.functions.litimport org.apache.spark.sql.internal._import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATIONimport org.apache.spark.sql.sources.BaseRelationimport org.apache.spark.sql.streaming._import org.apache.spark.sql.types.{DataType, StructType}import org.apache.spark.sql.util.ExecutionListenerManagerimport org.apache.spark.util.{CallSite, Utils}/** * The entry point to programming Spark with the Dataset and DataFrame API. * * In environments that this has been created upfront (e.g. REPL, notebooks), use the builder * to get an existing session: * * {{{ * SparkSession.builder().getOrCreate() * }}} * * The builder can also be used to create a new session: * * {{{ * SparkSession.builder * .master("local") * .appName("Word Count") * .config("spark.some.config.option", "some-value") * .getOrCreate() * }}} * * @param sparkContext The Spark context associated with this Spark session. * @param existingSharedState If supplied, use the existing shared state * instead of creating a new one. * @param parentSessionState If supplied, inherit all session state (i.e. temporary * views, SQL config, UDFs etc) from parent. */@Stableclass SparkSession private( @transient val sparkContext: SparkContext, @transient private val existingSharedState: Option[SharedState], @transient private val parentSessionState: Option[SessionState], @transient private[sql] val extensions: SparkSessionExtensions, @transient private[sql] val initialSessionOptions: Map[String, String]) extends Serializable with Closeable with Logging { self => // The call site where this SparkSession was constructed. private val creationSite: CallSite = Utils.getCallSite() /** * Constructor used in Pyspark. Contains explicit application of Spark Session Extensions * which otherwise only occurs during getOrCreate. We cannot add this to the default constructor * since that would cause every new session to reinvoke Spark Session Extensions on the currently * running extensions. */ private[sql] def this( sc: SparkContext, initialSessionOptions: java.util.HashMap[String, String]) = { this(sc, None, None, SparkSession.applyExtensions(sc, new SparkSessionExtensions), initialSessionOptions.asScala.toMap) } private[sql] def this(sc: SparkContext) = this(sc, new java.util.HashMap[String, String]()) private[sql] val sessionUUID: String = UUID.randomUUID.toString sparkContext.assertNotStopped() // If there is no active SparkSession, uses the default SQL conf. Otherwise, use the session's. SQLConf.setSQLConfGetter(() => { SparkSession.getActiveSession.filterNot(_.sparkContext.isStopped).map(_.sessionState.conf) .getOrElse(SQLConf.getFallbackConf) }) /** * The version of Spark on which this application is running. * * @since 2.0.0 */ def version: String = SPARK_VERSION /* ----------------------- * | Session-related state | * ----------------------- */ /** * State shared across sessions, including the `SparkContext`, cached data, listener, * and a catalog that interacts with external systems. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sharedState: SharedState = { existingSharedState.getOrElse(new SharedState(sparkContext, initialSessionOptions)) } /** * State isolated across sessions, including SQL configurations, temporary tables, registered * functions, and everything else that accepts a [[org.apache.spark.sql.internal.SQLConf]]. * If `parentSessionState` is not null, the `SessionState` will be a copy of the parent. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sessionState: SessionState = { parentSessionState .map(_.clone(this)) .getOrElse { val state = SparkSession.instantiateSessionState( SparkSession.sessionStateClassName(sharedState.conf), self) state } } /** * A wrapped version of this session in the form of a [[SQLContext]], for backward compatibility. * * @since 2.0.0 */ @transient val sqlContext: SQLContext = new SQLContext(this) /** * Runtime configuration interface for Spark. * * This is the interface through which the user can get and set all Spark and Hadoop * configurations that are relevant to Spark SQL. When getting the value of a config, * this defaults to the value set in the underlying `SparkContext`, if any. * * @since 2.0.0 */ @transient lazy val conf: RuntimeConfig = new RuntimeConfig(sessionState.conf) /** * An interface to register custom [[org.apache.spark.sql.util.QueryExecutionListener]]s * that listen for execution metrics. * * @since 2.0.0 */ def listenerManager: ExecutionListenerManager = sessionState.listenerManager /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @since 2.0.0 */ @Experimental @Unstable def experimental: ExperimentalMethods = sessionState.experimentalMethods /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sparkSession.udf.register("myUDF", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sparkSession.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @note The user-defined functions must be deterministic. Due to optimization, * duplicate invocations may be eliminated or the function may even be invoked more times than * it is present in the query. * * @since 2.0.0 */ def udf: UDFRegistration = sessionState.udfRegistration def udtf: UDTFRegistration = sessionState.udtfRegistration /** * Returns a `StreamingQueryManager` that allows managing all the * `StreamingQuery`s active on `this`. * * @since 2.0.0 */ @Unstable def streams: StreamingQueryManager = sessionState.streamingQueryManager /** * Start a new session with isolated SQL configurations, temporary tables, registered * functions are isolated, but sharing the underlying `SparkContext` and cached data. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. * * @since 2.0.0 */ def newSession(): SparkSession = { new SparkSession( sparkContext, Some(sharedState), parentSessionState = None, extensions, initialSessionOptions) } /** * Create an identical copy of this `SparkSession`, sharing the underlying `SparkContext` * and shared state. All the state of this session (i.e. SQL configurations, temporary tables, * registered functions) is copied over, and the cloned session is set up with the same shared * state as this session. The cloned session is independent of this session, that is, any * non-global change in either session is not reflected in the other. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. */ private[sql] def cloneSession(): SparkSession = { val result = new SparkSession( sparkContext, Some(sharedState), Some(sessionState), extensions, Map.empty) result.sessionState // force copy of SessionState result } /* --------------------------------- * | Methods for creating DataFrames | * --------------------------------- */ /** * Returns a `DataFrame` with no rows or columns. * * @since 2.0.0 */ @transient lazy val emptyDataFrame: DataFrame = Dataset.ofRows(self, LocalRelation()) /** * Creates a new [[Dataset]] of type T containing zero elements. * * @since 2.0.0 */ def emptyDataset[T: Encoder]: Dataset[T] = { val encoder = implicitly[Encoder[T]] new Dataset(self, LocalRelation(encoder.schema), encoder) } /** * Creates a `DataFrame` from an RDD of Product (e.g. case classes, tuples). * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = withActive { val encoder = Encoders.product[A] Dataset.ofRows(self, ExternalRDD(rdd, self)(encoder)) } /** * Creates a `DataFrame` from a local Seq of Product. * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = withActive { val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = toAttributes(schema) Dataset.ofRows(self, LocalRelation.fromProduct(attributeSeq, data)) } /** * :: DeveloperApi :: * Creates a `DataFrame` from an `RDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sparkSession = new org.apache.spark.sql.SparkSession(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sparkSession.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.createOrReplaceTempView("people") * sparkSession.sql("select name from people").collect.foreach(println) * }}} * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val encoder = ExpressionEncoder(replaced) val toRow = encoder.createSerializer() val catalystRows = rowRDD.map(toRow) internalCreateDataFrame(catalystRows.setName(rowRDD.name), schema) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `JavaRDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] createDataFrame(rowRDD.rdd, replaced) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `java.util.List` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided List matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] Dataset.ofRows(self, LocalRelation.fromExternalRows(toAttributes(replaced), rows.asScala.toSeq)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = withActive { val attributeSeq: Seq[AttributeReference] = getSchema(beanClass) val className = beanClass.getName val rowRdd = rdd.mapPartitions { iter => // BeanInfo is not serializable so we must rediscover it remotely for each partition. SQLContext.beansToRows(iter, Utils.classForName(className), attributeSeq) } Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRdd.setName(rdd.name))(self)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd.rdd, beanClass) } /** * Applies a schema to a List of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @since 1.6.0 */ def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = withActive { val attrSeq = getSchema(beanClass) val rows = SQLContext.beansToRows(data.asScala.iterator, beanClass, attrSeq) Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq)) } /** * Convert a `BaseRelation` created for external data sources into a `DataFrame`. * * @since 2.0.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { Dataset.ofRows(self, LogicalRelation(baseRelation)) } /* ------------------------------- * | Methods for creating DataSets | * ------------------------------- */ /** * Creates a [[Dataset]] from a local Seq of data of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Example == * * {{{ * * import spark.implicits._ * case class Person(name: String, age: Long) * val data = Seq(Person("Michael", 29), Person("Andy", 30), Person("Justin", 19)) * val ds = spark.createDataset(data) * * ds.show() * // +-------+---+ * // | name|age| * // +-------+---+ * // |Michael| 29| * // | Andy| 30| * // | Justin| 19| * // +-------+---+ * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = { val enc = encoderFor[T] val toRow = enc.createSerializer() val attributes = toAttributes(enc.schema) val encoded = data.map(d => toRow(d).copy()) val plan = new LocalRelation(attributes, encoded) Dataset[T](self, plan) } /** * Creates a [[Dataset]] from an RDD of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * @since 2.0.0 */ def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = { Dataset[T](self, ExternalRDD(data, self)) } /** * Creates a [[Dataset]] from a `java.util.List` of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Java Example == * * {{{ * List<String> data = Arrays.asList("hello", "world"); * Dataset<String> ds = spark.createDataset(data, Encoders.STRING()); * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = { createDataset(data.asScala.toSeq) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from 0 to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(end: Long): Dataset[java.lang.Long] = range(0, end) /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(start: Long, end: Long): Dataset[java.lang.Long] = { range(start, end, step = 1, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long): Dataset[java.lang.Long] = { range(start, end, step, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value, with partition number * specified. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = { new Dataset(self, Range(start, end, step, numPartitions), Encoders.LONG) } /** * Creates a `DataFrame` from an `RDD[InternalRow]`. */ private[sql] def internalCreateDataFrame( catalystRows: RDD[InternalRow], schema: StructType, isStreaming: Boolean = false): DataFrame = { // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val logicalPlan = LogicalRDD( toAttributes(schema), catalystRows, isStreaming = isStreaming)(self) Dataset.ofRows(self, logicalPlan) } /* ------------------------- * | Catalog-related methods | * ------------------------- */ /** * Interface through which the user may create, drop, alter or query underlying * databases, tables, functions etc. * * @since 2.0.0 */ @transient lazy val catalog: Catalog = new CatalogImpl(self) /** * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, * the returned DataFrame is simply the query plan of the view, which can either be a batch or * streaming query plan. * * @param tableName is either a qualified or unqualified name that designates a table or view. * If a database is specified, it identifies the table/view from the database. * Otherwise, it first attempts to find a temporary view with the given name * and then match the table/view from the current database. * Note that, the global temporary view database is also valid here. * @since 2.0.0 */ def table(tableName: String): DataFrame = { read.table(tableName) } private[sql] def table(tableIdent: TableIdentifier): DataFrame = { Dataset.ofRows(self, UnresolvedRelation(tableIdent)) } /* ----------------- * | Everything else | * ----------------- */ /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql(sqlText: String, args: Array[_], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { PosParameterizedQuery(parsedPlan, args.map(lit(_).expr)) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.5.0 */ @Experimental def sql(sqlText: String, args: Array[_]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql( sqlText: String, args: Map[String, Any], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { NameParameterizedQuery(parsedPlan, args.mapValues(lit(_).expr).toMap) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: Map[String, Any]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: java.util.Map[String, Any]): DataFrame = { sql(sqlText, args.asScala.toMap) } /** * Executes a SQL query using Spark, returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @since 2.0.0 */ def sql(sqlText: String): DataFrame = sql(sqlText, Map.empty[String, Any]) /** * Execute an arbitrary string command inside an external execution engine rather than Spark. * This could be useful when user wants to execute some commands out of Spark. For * example, executing custom DDL/DML command for JDBC, creating index for ElasticSearch, * creating cores for Solr and so on. * * The command will be eagerly executed after this method is called and the returned * DataFrame will contain the output of the command(if any). * * @param runner The class name of the runner that implements `ExternalCommandRunner`. * @param command The target command to be executed * @param options The options for the runner. * * @since 3.0.0 */ @Unstable def executeCommand(runner: String, command: String, options: Map[String, String]): DataFrame = { DataSource.lookupDataSource(runner, sessionState.conf) match { case source if classOf[ExternalCommandRunner].isAssignableFrom(source) => Dataset.ofRows(self, ExternalCommandExecutor( source.newInstance().asInstanceOf[ExternalCommandRunner], command, options)) case _ => throw QueryCompilationErrors.commandExecutionInRunnerUnsupportedError(runner) } } /** * Returns a [[DataFrameReader]] that can be used to read non-streaming data in as a * `DataFrame`. * {{{ * sparkSession.read.parquet("/path/to/file.parquet") * sparkSession.read.schema(schema).json("/path/to/file.json") * }}} * * @since 2.0.0 */ def read: DataFrameReader = new DataFrameReader(self) /** * Returns a `DataStreamReader` that can be used to read streaming data in as a `DataFrame`. * {{{ * sparkSession.readStream.parquet("/path/to/directory/of/parquet/files") * sparkSession.readStream.schema(schema).json("/path/to/directory/of/json/files") * }}} * * @since 2.0.0 */ def readStream: DataStreamReader = new DataStreamReader(self) /** * Executes some code block and prints to stdout the time taken to execute the block. This is * available in Scala only and is used primarily for interactive testing and debugging. * * @since 2.1.0 */ def time[T](f: => T): T = { val start = System.nanoTime() val ret = f val end = System.nanoTime() // scalastyle:off println println(s"Time taken: ${NANOSECONDS.toMillis(end - start)} ms") // scalastyle:on println ret } // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into `DataFrame`s. * * {{{ * val sparkSession = SparkSession.builder.getOrCreate() * import sparkSession.implicits._ * }}} * * @since 2.0.0 */ object implicits extends SQLImplicits with Serializable { protected override def _sqlContext: SQLContext = SparkSession.this.sqlContext } // scalastyle:on /** * Stop the underlying `SparkContext`. * * @since 2.0.0 */ def stop(): Unit = { sparkContext.stop() } /** * Synonym for `stop()`. * * @since 2.1.0 */ override def close(): Unit = stop() /** * Parses the data type in our internal string representation. The data type string should * have the same format as the one generated by `toString` in scala. * It is only used by PySpark. */ protected[sql] def parseDataType(dataTypeString: String): DataType = { DataType.fromJson(dataTypeString) } /** * Apply a schema defined by the schemaString to an RDD. It is only used by PySpark. */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schemaString: String): DataFrame = { val schema = DataType.fromJson(schemaString).asInstanceOf[StructType] applySchemaToPythonRDD(rdd, schema) } /** * Apply `schema` to an RDD. * * @note Used by PySpark only */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schema: StructType): DataFrame = { val rowRdd = rdd.mapPartitions { iter => val fromJava = python.EvaluatePython.makeFromJava(schema) iter.map(r => fromJava(r).asInstanceOf[InternalRow]) } internalCreateDataFrame(rowRdd, schema) } /** * Returns a Catalyst Schema for the given java bean class. */ private def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } } /** * Execute a block of code with the this session set as the active session, and restore the * previous session on completion. */ private[sql] def withActive[T](block: => T): T = { // Use the active session thread local directly to make sure we get the session that is actually // set and not the default session. This to prevent that we promote the default session to the // active session once we are done. val old = SparkSession.activeThreadSession.get() SparkSession.setActiveSession(this) try block finally { SparkSession.setActiveSession(old) } } private[sql] def leafNodeDefaultParallelism: Int = { conf.get(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM).getOrElse(sparkContext.defaultParallelism) }}@Stableobject SparkSession extends Logging { /** * Builder for [[SparkSession]]. */ @Stable class Builder extends Logging { private[this] val options = new scala.collection.mutable.HashMap[String, String] private[this] val extensions = new SparkSessionExtensions private[this] var userSuppliedContext: Option[SparkContext] = None private[spark] def sparkContext(sparkContext: SparkContext): Builder = synchronized { userSuppliedContext = Option(sparkContext) this } /** * Sets a name for the application, which will be shown in the Spark web UI. * If no application name is set, a randomly generated name will be used. * * @since 2.0.0 */ def appName(name: String): Builder = config("spark.app.name", name) /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: String): Builder = synchronized { options += key -> value this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Long): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Double): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Boolean): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: Map[String, Any]): Builder = synchronized { map.foreach { kv: (String, Any) => { options += kv._1 -> kv._2.toString } } this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: java.util.Map[String, Any]): Builder = synchronized { config(map.asScala.toMap) } /** * Sets a list of config options based on the given `SparkConf`. * * @since 2.0.0 */ def config(conf: SparkConf): Builder = synchronized { conf.getAll.foreach { case (k, v) => options += k -> v } this } /** * Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. * * @since 2.0.0 */ def master(master: String): Builder = config("spark.master", master) /** * Enables Hive support, including connectivity to a persistent Hive metastore, support for * Hive serdes, and Hive user-defined functions. * * @since 2.0.0 */ def enableHiveSupport(): Builder = synchronized { if (hiveClassesArePresent) { config(CATALOG_IMPLEMENTATION.key, "hive") } else { throw new IllegalArgumentException( "Unable to instantiate SparkSession with Hive support because " + "Hive classes are not found.") } } /** * Inject extensions into the [[SparkSession]]. This allows a user to add Analyzer rules, * Optimizer rules, Planning Strategies or a customized parser. * * @since 2.2.0 */ def withExtensions(f: SparkSessionExtensions => Unit): Builder = synchronized { f(extensions) this } /** * Gets an existing [[SparkSession]] or, if there is no existing one, creates a new * one based on the options set in this builder. * * This method first checks whether there is a valid thread-local SparkSession, * and if yes, return that one. It then checks whether there is a valid global * default SparkSession, and if yes, return that one. If no valid global default * SparkSession exists, the method creates a new SparkSession and assigns the * newly created SparkSession as the global default. * * In case an existing SparkSession is returned, the non-static config options specified in * this builder will be applied to the existing SparkSession. * * @since 2.0.0 */ def getOrCreate(): SparkSession = synchronized { val sparkConf = new SparkConf() options.foreach { case (k, v) => sparkConf.set(k, v) } if (!sparkConf.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { assertOnDriver() } // Get the session from current thread's active session. var session = activeThreadSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // Global synchronization so we will only set the default session once. SparkSession.synchronized { // If the current thread does not have an active session, get it from the global session. session = defaultSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // No active nor global default session. Create a new one. val sparkContext = userSuppliedContext.getOrElse { // set a random app name if not given. if (!sparkConf.contains("spark.app.name")) { sparkConf.setAppName(java.util.UUID.randomUUID().toString) } SparkContext.getOrCreate(sparkConf) // Do not update `SparkConf` for existing `SparkContext`, as it's shared by all sessions. } loadExtensions(extensions) applyExtensions(sparkContext, extensions) session = new SparkSession(sparkContext, None, None, extensions, options.toMap) setDefaultSession(session) setActiveSession(session) registerContextListener(sparkContext) } return session } } /** * Creates a [[SparkSession.Builder]] for constructing a [[SparkSession]]. * * @since 2.0.0 */ def builder(): Builder = new Builder /** * Changes the SparkSession that will be returned in this thread and its children when * SparkSession.getOrCreate() is called. This can be used to ensure that a given thread receives * a SparkSession with an isolated session, instead of the global (first created) context. * * @since 2.0.0 */ def setActiveSession(session: SparkSession): Unit = { activeThreadSession.set(session) } /** * Clears the active SparkSession for current thread. Subsequent calls to getOrCreate will * return the first created context instead of a thread-local override. * * @since 2.0.0 */ def clearActiveSession(): Unit = { activeThreadSession.remove() } /** * Sets the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def setDefaultSession(session: SparkSession): Unit = { defaultSession.set(session) } /** * Clears the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def clearDefaultSession(): Unit = { defaultSession.set(null) } /** * Returns the active SparkSession for the current thread, returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(activeThreadSession.get) } } /** * Returns the default SparkSession that is returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getDefaultSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(defaultSession.get) } } /** * Returns the currently active SparkSession, otherwise the default one. If there is no default * SparkSession, throws an exception. * * @since 2.4.0 */ def active: SparkSession = { getActiveSession.getOrElse(getDefaultSession.getOrElse( throw new IllegalStateException("No active or default Spark session found"))) } /** * Apply modifiable settings to an existing [[SparkSession]]. This method are used * both in Scala and Python, so put this under [[SparkSession]] object. */ private[sql] def applyModifiableSettings( session: SparkSession, options: java.util.HashMap[String, String]): Unit = { // Lazy val to avoid an unnecessary session state initialization lazy val conf = session.sessionState.conf val dedupOptions = if (options.isEmpty) Map.empty[String, String] else ( options.asScala.toSet -- conf.getAllConfs.toSet).toMap val (staticConfs, otherConfs) = dedupOptions.partition(kv => SQLConf.isStaticConfigKey(kv._1)) otherConfs.foreach { case (k, v) => conf.setConfString(k, v) } // Note that other runtime SQL options, for example, for other third-party datasource // can be marked as an ignored configuration here. val maybeIgnoredConfs = otherConfs.filterNot { case (k, _) => conf.isModifiable(k) } if (staticConfs.nonEmpty || maybeIgnoredConfs.nonEmpty) { logWarning( "Using an existing Spark session; only runtime SQL configurations will take effect.") } if (staticConfs.nonEmpty) { logDebug("Ignored static SQL configurations:\n " + conf.redactOptions(staticConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } if (maybeIgnoredConfs.nonEmpty) { // Only print out non-static and non-runtime SQL configurations. // Note that this might show core configurations or source specific // options defined in the third-party datasource. logDebug("Configurations that might not take effect:\n " + conf.redactOptions( maybeIgnoredConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } } /** * Returns a cloned SparkSession with all specified configurations disabled, or * the original SparkSession if all configurations are already disabled. */ private[sql] def getOrCloneSessionWithConfigsOff( session: SparkSession, configurations: Seq[ConfigEntry[Boolean]]): SparkSession = { val configsEnabled = configurations.filter(session.conf.get[Boolean]) if (configsEnabled.isEmpty) { session } else { val newSession = session.cloneSession() configsEnabled.foreach(conf => { newSession.conf.set(conf, false) }) newSession } } //////////////////////////////////////////////////////////////////////////////////////// // Private methods from now on //////////////////////////////////////////////////////////////////////////////////////// private val listenerRegistered: AtomicBoolean = new AtomicBoolean(false) /** Register the AppEnd listener onto the Context */ private def registerContextListener(sparkContext: SparkContext): Unit = { if (!listenerRegistered.get()) { sparkContext.addSparkListener(new SparkListener { override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { defaultSession.set(null) listenerRegistered.set(false) } }) listenerRegistered.set(true) } } /** The active SparkSession for the current thread. */ private val activeThreadSession = new InheritableThreadLocal[SparkSession] /** Reference to the root SparkSession. */ private val defaultSession = new AtomicReference[SparkSession] private val HIVE_SESSION_STATE_BUILDER_CLASS_NAME = "org.apache.spark.sql.hive.HiveSessionStateBuilder" private def sessionStateClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_SESSION_STATE_BUILDER_CLASS_NAME case "in-memory" => classOf[SessionStateBuilder].getCanonicalName } } private def assertOnDriver(): Unit = { if (TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkSession should only be created and accessed on the driver.") } } /** * Helper method to create an instance of `SessionState` based on `className` from conf. * The result is either `SessionState` or a Hive based `SessionState`. */ private def instantiateSessionState( className: String, sparkSession: SparkSession): SessionState = { try { // invoke new [Hive]SessionStateBuilder( // SparkSession, // Option[SessionState]) val clazz = Utils.classForName(className) val ctor = clazz.getConstructors.head ctor.newInstance(sparkSession, None).asInstanceOf[BaseSessionStateBuilder].build() } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Error while instantiating '$className':", e) } } /** * @return true if Hive classes can be loaded, otherwise false. */ private[spark] def hiveClassesArePresent: Boolean = { try { Utils.classForName(HIVE_SESSION_STATE_BUILDER_CLASS_NAME) Utils.classForName("org.apache.hadoop.hive.conf.HiveConf") true } catch { case _: ClassNotFoundException | _: NoClassDefFoundError => false } } private[spark] def cleanupAnyExistingSession(): Unit = { val session = getActiveSession.orElse(getDefaultSession) if (session.isDefined) { logWarning( s"""An existing Spark session exists as the active or default session. |This probably means another suite leaked it. Attempting to stop it before continuing. |This existing Spark session was created at: | |${session.get.creationSite.longForm} | """.stripMargin) session.get.stop() SparkSession.clearActiveSession() SparkSession.clearDefaultSession() } } /** * Initialize extensions specified in [[StaticSQLConf]]. The classes will be applied to the * extensions passed into this function. */ private def applyExtensions( sparkContext: SparkContext, extensions: SparkSessionExtensions): SparkSessionExtensions = { val extensionConfClassNames = sparkContext.getConf.get(StaticSQLConf.SPARK_SESSION_EXTENSIONS) .getOrElse(Seq.empty) extensionConfClassNames.foreach { extensionConfClassName => try { val extensionConfClass = Utils.classForName(extensionConfClassName) val extensionConf = extensionConfClass.getConstructor().newInstance() .asInstanceOf[SparkSessionExtensions => Unit] extensionConf(extensions) } catch { // Ignore the error if we cannot find the class or when the class has the wrong type. case e@(_: ClassCastException | _: ClassNotFoundException | _: NoClassDefFoundError) => logWarning(s"Cannot use $extensionConfClassName to configure session extensions.", e) } } extensions } /** * Load extensions from [[ServiceLoader]] and use them */ private def loadExtensions(extensions: SparkSessionExtensions): Unit = { val loader = ServiceLoader.load(classOf[SparkSessionExtensionsProvider], Utils.getContextOrSparkClassLoader) val loadedExts = loader.iterator() while (loadedExts.hasNext) { try { val ext = loadedExts.next() ext(extensions) } catch { case e: Throwable => logWarning("Failed to load session extension", e) } } }} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.spark.sqlimport java.io.Closeableimport java.util.{ServiceLoader, UUID}import java.util.concurrent.TimeUnit._import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}import scala.collection.JavaConverters._import scala.reflect.runtime.universe.TypeTagimport scala.util.control.NonFatalimport org.apache.spark.{SPARK_VERSION, SparkConf, SparkContext, TaskContext}import org.apache.spark.annotation.{DeveloperApi, Experimental, Stable, Unstable}import org.apache.spark.api.java.JavaRDDimport org.apache.spark.internal.Loggingimport org.apache.spark.internal.config.{ConfigEntry, EXECUTOR_ALLOW_SPARK_CONTEXT}import org.apache.spark.rdd.RDDimport org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}import org.apache.spark.sql.catalog.Catalogimport org.apache.spark.sql.catalyst._import org.apache.spark.sql.catalyst.analysis.{NameParameterizedQuery, PosParameterizedQuery, UnresolvedRelation}import org.apache.spark.sql.catalyst.encoders._import org.apache.spark.sql.catalyst.expressions.AttributeReferenceimport org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Range}import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributesimport org.apache.spark.sql.catalyst.util.CharVarcharUtilsimport org.apache.spark.sql.connector.ExternalCommandRunnerimport org.apache.spark.sql.errors.QueryCompilationErrorsimport org.apache.spark.sql.execution._import org.apache.spark.sql.execution.command.ExternalCommandExecutorimport org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}import org.apache.spark.sql.functions.litimport org.apache.spark.sql.internal._import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATIONimport org.apache.spark.sql.sources.BaseRelationimport org.apache.spark.sql.streaming._import org.apache.spark.sql.types.{DataType, StructType}import org.apache.spark.sql.util.ExecutionListenerManagerimport org.apache.spark.util.{CallSite, Utils}/** * The entry point to programming Spark with the Dataset and DataFrame API. * * In environments that this has been created upfront (e.g. REPL, notebooks), use the builder * to get an existing session: * * {{{ * SparkSession.builder().getOrCreate() * }}} * * The builder can also be used to create a new session: * * {{{ * SparkSession.builder * .master("local") * .appName("Word Count") * .config("spark.some.config.option", "some-value") * .getOrCreate() * }}} * * @param sparkContext The Spark context associated with this Spark session. * @param existingSharedState If supplied, use the existing shared state * instead of creating a new one. * @param parentSessionState If supplied, inherit all session state (i.e. temporary * views, SQL config, UDFs etc) from parent. */@Stableclass SparkSession private( @transient val sparkContext: SparkContext, @transient private val existingSharedState: Option[SharedState], @transient private val parentSessionState: Option[SessionState], @transient private[sql] val extensions: SparkSessionExtensions, @transient private[sql] val initialSessionOptions: Map[String, String]) extends Serializable with Closeable with Logging { self => // The call site where this SparkSession was constructed. private val creationSite: CallSite = Utils.getCallSite() /** * Constructor used in Pyspark. Contains explicit application of Spark Session Extensions * which otherwise only occurs during getOrCreate. We cannot add this to the default constructor * since that would cause every new session to reinvoke Spark Session Extensions on the currently * running extensions. */ private[sql] def this( sc: SparkContext, initialSessionOptions: java.util.HashMap[String, String]) = { this(sc, None, None, SparkSession.applyExtensions(sc, new SparkSessionExtensions), initialSessionOptions.asScala.toMap) } private[sql] def this(sc: SparkContext) = this(sc, new java.util.HashMap[String, String]()) private[sql] val sessionUUID: String = UUID.randomUUID.toString sparkContext.assertNotStopped() // If there is no active SparkSession, uses the default SQL conf. Otherwise, use the session's. SQLConf.setSQLConfGetter(() => { SparkSession.getActiveSession.filterNot(_.sparkContext.isStopped).map(_.sessionState.conf) .getOrElse(SQLConf.getFallbackConf) }) /** * The version of Spark on which this application is running. * * @since 2.0.0 */ def version: String = SPARK_VERSION /* ----------------------- * | Session-related state | * ----------------------- */ /** * State shared across sessions, including the `SparkContext`, cached data, listener, * and a catalog that interacts with external systems. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sharedState: SharedState = { existingSharedState.getOrElse(new SharedState(sparkContext, initialSessionOptions)) } /** * State isolated across sessions, including SQL configurations, temporary tables, registered * functions, and everything else that accepts a [[org.apache.spark.sql.internal.SQLConf]]. * If `parentSessionState` is not null, the `SessionState` will be a copy of the parent. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sessionState: SessionState = { parentSessionState .map(_.clone(this)) .getOrElse { val state = SparkSession.instantiateSessionState( SparkSession.sessionStateClassName(sharedState.conf), self) state } } /** * A wrapped version of this session in the form of a [[SQLContext]], for backward compatibility. * * @since 2.0.0 */ @transient val sqlContext: SQLContext = new SQLContext(this) /** * Runtime configuration interface for Spark. * * This is the interface through which the user can get and set all Spark and Hadoop * configurations that are relevant to Spark SQL. When getting the value of a config, * this defaults to the value set in the underlying `SparkContext`, if any. * * @since 2.0.0 */ @transient lazy val conf: RuntimeConfig = new RuntimeConfig(sessionState.conf) /** * An interface to register custom [[org.apache.spark.sql.util.QueryExecutionListener]]s * that listen for execution metrics. * * @since 2.0.0 */ def listenerManager: ExecutionListenerManager = sessionState.listenerManager /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @since 2.0.0 */ @Experimental @Unstable def experimental: ExperimentalMethods = sessionState.experimentalMethods /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sparkSession.udf.register("myUDF", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sparkSession.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @note The user-defined functions must be deterministic. Due to optimization, * duplicate invocations may be eliminated or the function may even be invoked more times than * it is present in the query. * * @since 2.0.0 */ def udf: UDFRegistration = sessionState.udfRegistration def udtf: UDTFRegistration = sessionState.udtfRegistration /** * Returns a `StreamingQueryManager` that allows managing all the * `StreamingQuery`s active on `this`. * * @since 2.0.0 */ @Unstable def streams: StreamingQueryManager = sessionState.streamingQueryManager /** * Start a new session with isolated SQL configurations, temporary tables, registered * functions are isolated, but sharing the underlying `SparkContext` and cached data. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. * * @since 2.0.0 */ def newSession(): SparkSession = { new SparkSession( sparkContext, Some(sharedState), parentSessionState = None, extensions, initialSessionOptions) } /** * Create an identical copy of this `SparkSession`, sharing the underlying `SparkContext` * and shared state. All the state of this session (i.e. SQL configurations, temporary tables, * registered functions) is copied over, and the cloned session is set up with the same shared * state as this session. The cloned session is independent of this session, that is, any * non-global change in either session is not reflected in the other. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. */ private[sql] def cloneSession(): SparkSession = { val result = new SparkSession( sparkContext, Some(sharedState), Some(sessionState), extensions, Map.empty) result.sessionState // force copy of SessionState result } /* --------------------------------- * | Methods for creating DataFrames | * --------------------------------- */ /** * Returns a `DataFrame` with no rows or columns. * * @since 2.0.0 */ @transient lazy val emptyDataFrame: DataFrame = Dataset.ofRows(self, LocalRelation()) /** * Creates a new [[Dataset]] of type T containing zero elements. * * @since 2.0.0 */ def emptyDataset[T: Encoder]: Dataset[T] = { val encoder = implicitly[Encoder[T]] new Dataset(self, LocalRelation(encoder.schema), encoder) } /** * Creates a `DataFrame` from an RDD of Product (e.g. case classes, tuples). * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = withActive { val encoder = Encoders.product[A] Dataset.ofRows(self, ExternalRDD(rdd, self)(encoder)) } /** * Creates a `DataFrame` from a local Seq of Product. * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = withActive { val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = toAttributes(schema) Dataset.ofRows(self, LocalRelation.fromProduct(attributeSeq, data)) } /** * :: DeveloperApi :: * Creates a `DataFrame` from an `RDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sparkSession = new org.apache.spark.sql.SparkSession(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sparkSession.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.createOrReplaceTempView("people") * sparkSession.sql("select name from people").collect.foreach(println) * }}} * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val encoder = ExpressionEncoder(replaced) val toRow = encoder.createSerializer() val catalystRows = rowRDD.map(toRow) internalCreateDataFrame(catalystRows.setName(rowRDD.name), schema) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `JavaRDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] createDataFrame(rowRDD.rdd, replaced) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `java.util.List` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided List matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] Dataset.ofRows(self, LocalRelation.fromExternalRows(toAttributes(replaced), rows.asScala.toSeq)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = withActive { val attributeSeq: Seq[AttributeReference] = getSchema(beanClass) val className = beanClass.getName val rowRdd = rdd.mapPartitions { iter => // BeanInfo is not serializable so we must rediscover it remotely for each partition. SQLContext.beansToRows(iter, Utils.classForName(className), attributeSeq) } Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRdd.setName(rdd.name))(self)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd.rdd, beanClass) } /** * Applies a schema to a List of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @since 1.6.0 */ def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = withActive { val attrSeq = getSchema(beanClass) val rows = SQLContext.beansToRows(data.asScala.iterator, beanClass, attrSeq) Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq)) } /** * Convert a `BaseRelation` created for external data sources into a `DataFrame`. * * @since 2.0.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { Dataset.ofRows(self, LogicalRelation(baseRelation)) } /* ------------------------------- * | Methods for creating DataSets | * ------------------------------- */ /** * Creates a [[Dataset]] from a local Seq of data of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Example == * * {{{ * * import spark.implicits._ * case class Person(name: String, age: Long) * val data = Seq(Person("Michael", 29), Person("Andy", 30), Person("Justin", 19)) * val ds = spark.createDataset(data) * * ds.show() * // +-------+---+ * // | name|age| * // +-------+---+ * // |Michael| 29| * // | Andy| 30| * // | Justin| 19| * // +-------+---+ * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = { val enc = encoderFor[T] val toRow = enc.createSerializer() val attributes = toAttributes(enc.schema) val encoded = data.map(d => toRow(d).copy()) val plan = new LocalRelation(attributes, encoded) Dataset[T](self, plan) } /** * Creates a [[Dataset]] from an RDD of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * @since 2.0.0 */ def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = { Dataset[T](self, ExternalRDD(data, self)) } /** * Creates a [[Dataset]] from a `java.util.List` of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Java Example == * * {{{ * List<String> data = Arrays.asList("hello", "world"); * Dataset<String> ds = spark.createDataset(data, Encoders.STRING()); * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = { createDataset(data.asScala.toSeq) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from 0 to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(end: Long): Dataset[java.lang.Long] = range(0, end) /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(start: Long, end: Long): Dataset[java.lang.Long] = { range(start, end, step = 1, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long): Dataset[java.lang.Long] = { range(start, end, step, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value, with partition number * specified. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = { new Dataset(self, Range(start, end, step, numPartitions), Encoders.LONG) } /** * Creates a `DataFrame` from an `RDD[InternalRow]`. */ private[sql] def internalCreateDataFrame( catalystRows: RDD[InternalRow], schema: StructType, isStreaming: Boolean = false): DataFrame = { // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val logicalPlan = LogicalRDD( toAttributes(schema), catalystRows, isStreaming = isStreaming)(self) Dataset.ofRows(self, logicalPlan) } /* ------------------------- * | Catalog-related methods | * ------------------------- */ /** * Interface through which the user may create, drop, alter or query underlying * databases, tables, functions etc. * * @since 2.0.0 */ @transient lazy val catalog: Catalog = new CatalogImpl(self) /** * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, * the returned DataFrame is simply the query plan of the view, which can either be a batch or * streaming query plan. * * @param tableName is either a qualified or unqualified name that designates a table or view. * If a database is specified, it identifies the table/view from the database. * Otherwise, it first attempts to find a temporary view with the given name * and then match the table/view from the current database. * Note that, the global temporary view database is also valid here. * @since 2.0.0 */ def table(tableName: String): DataFrame = { read.table(tableName) } private[sql] def table(tableIdent: TableIdentifier): DataFrame = { Dataset.ofRows(self, UnresolvedRelation(tableIdent)) } /* ----------------- * | Everything else | * ----------------- */ /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql(sqlText: String, args: Array[_], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { PosParameterizedQuery(parsedPlan, args.map(lit(_).expr)) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.5.0 */ @Experimental def sql(sqlText: String, args: Array[_]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql( sqlText: String, args: Map[String, Any], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { NameParameterizedQuery(parsedPlan, args.mapValues(lit(_).expr).toMap) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: Map[String, Any]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: java.util.Map[String, Any]): DataFrame = { sql(sqlText, args.asScala.toMap) } /** * Executes a SQL query using Spark, returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @since 2.0.0 */ def sql(sqlText: String): DataFrame = sql(sqlText, Map.empty[String, Any]) /** * Execute an arbitrary string command inside an external execution engine rather than Spark. * This could be useful when user wants to execute some commands out of Spark. For * example, executing custom DDL/DML command for JDBC, creating index for ElasticSearch, * creating cores for Solr and so on. * * The command will be eagerly executed after this method is called and the returned * DataFrame will contain the output of the command(if any). * * @param runner The class name of the runner that implements `ExternalCommandRunner`. * @param command The target command to be executed * @param options The options for the runner. * * @since 3.0.0 */ @Unstable def executeCommand(runner: String, command: String, options: Map[String, String]): DataFrame = { DataSource.lookupDataSource(runner, sessionState.conf) match { case source if classOf[ExternalCommandRunner].isAssignableFrom(source) => Dataset.ofRows(self, ExternalCommandExecutor( source.newInstance().asInstanceOf[ExternalCommandRunner], command, options)) case _ => throw QueryCompilationErrors.commandExecutionInRunnerUnsupportedError(runner) } } /** * Returns a [[DataFrameReader]] that can be used to read non-streaming data in as a * `DataFrame`. * {{{ * sparkSession.read.parquet("/path/to/file.parquet") * sparkSession.read.schema(schema).json("/path/to/file.json") * }}} * * @since 2.0.0 */ def read: DataFrameReader = new DataFrameReader(self) /** * Returns a `DataStreamReader` that can be used to read streaming data in as a `DataFrame`. * {{{ * sparkSession.readStream.parquet("/path/to/directory/of/parquet/files") * sparkSession.readStream.schema(schema).json("/path/to/directory/of/json/files") * }}} * * @since 2.0.0 */ def readStream: DataStreamReader = new DataStreamReader(self) /** * Executes some code block and prints to stdout the time taken to execute the block. This is * available in Scala only and is used primarily for interactive testing and debugging. * * @since 2.1.0 */ def time[T](f: => T): T = { val start = System.nanoTime() val ret = f val end = System.nanoTime() // scalastyle:off println println(s"Time taken: ${NANOSECONDS.toMillis(end - start)} ms") // scalastyle:on println ret } // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into `DataFrame`s. * * {{{ * val sparkSession = SparkSession.builder.getOrCreate() * import sparkSession.implicits._ * }}} * * @since 2.0.0 */ object implicits extends SQLImplicits with Serializable { protected override def _sqlContext: SQLContext = SparkSession.this.sqlContext } // scalastyle:on /** * Stop the underlying `SparkContext`. * * @since 2.0.0 */ def stop(): Unit = { sparkContext.stop() } /** * Synonym for `stop()`. * * @since 2.1.0 */ override def close(): Unit = stop() /** * Parses the data type in our internal string representation. The data type string should * have the same format as the one generated by `toString` in scala. * It is only used by PySpark. */ protected[sql] def parseDataType(dataTypeString: String): DataType = { DataType.fromJson(dataTypeString) } /** * Apply a schema defined by the schemaString to an RDD. It is only used by PySpark. */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schemaString: String): DataFrame = { val schema = DataType.fromJson(schemaString).asInstanceOf[StructType] applySchemaToPythonRDD(rdd, schema) } /** * Apply `schema` to an RDD. * * @note Used by PySpark only */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schema: StructType): DataFrame = { val rowRdd = rdd.mapPartitions { iter => val fromJava = python.EvaluatePython.makeFromJava(schema) iter.map(r => fromJava(r).asInstanceOf[InternalRow]) } internalCreateDataFrame(rowRdd, schema) } /** * Returns a Catalyst Schema for the given java bean class. */ private def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } } /** * Execute a block of code with the this session set as the active session, and restore the * previous session on completion. */ private[sql] def withActive[T](block: => T): T = { // Use the active session thread local directly to make sure we get the session that is actually // set and not the default session. This to prevent that we promote the default session to the // active session once we are done. val old = SparkSession.activeThreadSession.get() SparkSession.setActiveSession(this) try block finally { SparkSession.setActiveSession(old) } } private[sql] def leafNodeDefaultParallelism: Int = { conf.get(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM).getOrElse(sparkContext.defaultParallelism) }}@Stableobject SparkSession extends Logging { /** * Builder for [[SparkSession]]. */ @Stable class Builder extends Logging { private[this] val options = new scala.collection.mutable.HashMap[String, String] private[this] val extensions = new SparkSessionExtensions private[this] var userSuppliedContext: Option[SparkContext] = None private[spark] def sparkContext(sparkContext: SparkContext): Builder = synchronized { userSuppliedContext = Option(sparkContext) this } /** * Sets a name for the application, which will be shown in the Spark web UI. * If no application name is set, a randomly generated name will be used. * * @since 2.0.0 */ def appName(name: String): Builder = config("spark.app.name", name) /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: String): Builder = synchronized { options += key -> value this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Long): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Double): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Boolean): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: Map[String, Any]): Builder = synchronized { map.foreach { kv: (String, Any) => { options += kv._1 -> kv._2.toString } } this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: java.util.Map[String, Any]): Builder = synchronized { config(map.asScala.toMap) } /** * Sets a list of config options based on the given `SparkConf`. * * @since 2.0.0 */ def config(conf: SparkConf): Builder = synchronized { conf.getAll.foreach { case (k, v) => options += k -> v } this } /** * Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. * * @since 2.0.0 */ def master(master: String): Builder = config("spark.master", master) /** * Enables Hive support, including connectivity to a persistent Hive metastore, support for * Hive serdes, and Hive user-defined functions. * * @since 2.0.0 */ def enableHiveSupport(): Builder = synchronized { if (hiveClassesArePresent) { config(CATALOG_IMPLEMENTATION.key, "hive") } else { throw new IllegalArgumentException( "Unable to instantiate SparkSession with Hive support because " + "Hive classes are not found.") } } /** * Inject extensions into the [[SparkSession]]. This allows a user to add Analyzer rules, * Optimizer rules, Planning Strategies or a customized parser. * * @since 2.2.0 */ def withExtensions(f: SparkSessionExtensions => Unit): Builder = synchronized { f(extensions) this } /** * Gets an existing [[SparkSession]] or, if there is no existing one, creates a new * one based on the options set in this builder. * * This method first checks whether there is a valid thread-local SparkSession, * and if yes, return that one. It then checks whether there is a valid global * default SparkSession, and if yes, return that one. If no valid global default * SparkSession exists, the method creates a new SparkSession and assigns the * newly created SparkSession as the global default. * * In case an existing SparkSession is returned, the non-static config options specified in * this builder will be applied to the existing SparkSession. * * @since 2.0.0 */ def getOrCreate(): SparkSession = synchronized { val sparkConf = new SparkConf() options.foreach { case (k, v) => sparkConf.set(k, v) } if (!sparkConf.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { assertOnDriver() } // Get the session from current thread's active session. var session = activeThreadSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // Global synchronization so we will only set the default session once. SparkSession.synchronized { // If the current thread does not have an active session, get it from the global session. session = defaultSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // No active nor global default session. Create a new one. val sparkContext = userSuppliedContext.getOrElse { // set a random app name if not given. if (!sparkConf.contains("spark.app.name")) { sparkConf.setAppName(java.util.UUID.randomUUID().toString) } SparkContext.getOrCreate(sparkConf) // Do not update `SparkConf` for existing `SparkContext`, as it's shared by all sessions. } loadExtensions(extensions) applyExtensions(sparkContext, extensions) session = new SparkSession(sparkContext, None, None, extensions, options.toMap) setDefaultSession(session) setActiveSession(session) registerContextListener(sparkContext) } return session } } /** * Creates a [[SparkSession.Builder]] for constructing a [[SparkSession]]. * * @since 2.0.0 */ def builder(): Builder = new Builder /** * Changes the SparkSession that will be returned in this thread and its children when * SparkSession.getOrCreate() is called. This can be used to ensure that a given thread receives * a SparkSession with an isolated session, instead of the global (first created) context. * * @since 2.0.0 */ def setActiveSession(session: SparkSession): Unit = { activeThreadSession.set(session) } /** * Clears the active SparkSession for current thread. Subsequent calls to getOrCreate will * return the first created context instead of a thread-local override. * * @since 2.0.0 */ def clearActiveSession(): Unit = { activeThreadSession.remove() } /** * Sets the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def setDefaultSession(session: SparkSession): Unit = { defaultSession.set(session) } /** * Clears the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def clearDefaultSession(): Unit = { defaultSession.set(null) } /** * Returns the active SparkSession for the current thread, returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(activeThreadSession.get) } } /** * Returns the default SparkSession that is returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getDefaultSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(defaultSession.get) } } /** * Returns the currently active SparkSession, otherwise the default one. If there is no default * SparkSession, throws an exception. * * @since 2.4.0 */ def active: SparkSession = { getActiveSession.getOrElse(getDefaultSession.getOrElse( throw new IllegalStateException("No active or default Spark session found"))) } /** * Apply modifiable settings to an existing [[SparkSession]]. This method are used * both in Scala and Python, so put this under [[SparkSession]] object. */ private[sql] def applyModifiableSettings( session: SparkSession, options: java.util.HashMap[String, String]): Unit = { // Lazy val to avoid an unnecessary session state initialization lazy val conf = session.sessionState.conf val dedupOptions = if (options.isEmpty) Map.empty[String, String] else ( options.asScala.toSet -- conf.getAllConfs.toSet).toMap val (staticConfs, otherConfs) = dedupOptions.partition(kv => SQLConf.isStaticConfigKey(kv._1)) otherConfs.foreach { case (k, v) => conf.setConfString(k, v) } // Note that other runtime SQL options, for example, for other third-party datasource // can be marked as an ignored configuration here. val maybeIgnoredConfs = otherConfs.filterNot { case (k, _) => conf.isModifiable(k) } if (staticConfs.nonEmpty || maybeIgnoredConfs.nonEmpty) { logWarning( "Using an existing Spark session; only runtime SQL configurations will take effect.") } if (staticConfs.nonEmpty) { logDebug("Ignored static SQL configurations:\n " + conf.redactOptions(staticConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } if (maybeIgnoredConfs.nonEmpty) { // Only print out non-static and non-runtime SQL configurations. // Note that this might show core configurations or source specific // options defined in the third-party datasource. logDebug("Configurations that might not take effect:\n " + conf.redactOptions( maybeIgnoredConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } } /** * Returns a cloned SparkSession with all specified configurations disabled, or * the original SparkSession if all configurations are already disabled. */ private[sql] def getOrCloneSessionWithConfigsOff( session: SparkSession, configurations: Seq[ConfigEntry[Boolean]]): SparkSession = { val configsEnabled = configurations.filter(session.conf.get[Boolean]) if (configsEnabled.isEmpty) { session } else { val newSession = session.cloneSession() configsEnabled.foreach(conf => { newSession.conf.set(conf, false) }) newSession } } //////////////////////////////////////////////////////////////////////////////////////// // Private methods from now on //////////////////////////////////////////////////////////////////////////////////////// private val listenerRegistered: AtomicBoolean = new AtomicBoolean(false) /** Register the AppEnd listener onto the Context */ private def registerContextListener(sparkContext: SparkContext): Unit = { if (!listenerRegistered.get()) { sparkContext.addSparkListener(new SparkListener { override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { defaultSession.set(null) listenerRegistered.set(false) } }) listenerRegistered.set(true) } } /** The active SparkSession for the current thread. */ private val activeThreadSession = new InheritableThreadLocal[SparkSession] /** Reference to the root SparkSession. */ private val defaultSession = new AtomicReference[SparkSession] private val HIVE_SESSION_STATE_BUILDER_CLASS_NAME = "org.apache.spark.sql.hive.HiveSessionStateBuilder" private def sessionStateClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_SESSION_STATE_BUILDER_CLASS_NAME case "in-memory" => classOf[SessionStateBuilder].getCanonicalName } } private def assertOnDriver(): Unit = { if (TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkSession should only be created and accessed on the driver.") } } /** * Helper method to create an instance of `SessionState` based on `className` from conf. * The result is either `SessionState` or a Hive based `SessionState`. */ private def instantiateSessionState( className: String, sparkSession: SparkSession): SessionState = { try { // invoke new [Hive]SessionStateBuilder( // SparkSession, // Option[SessionState]) val clazz = Utils.classForName(className) val ctor = clazz.getConstructors.head ctor.newInstance(sparkSession, None).asInstanceOf[BaseSessionStateBuilder].build() } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Error while instantiating '$className':", e) } } /** * @return true if Hive classes can be loaded, otherwise false. */ private[spark] def hiveClassesArePresent: Boolean = { try { Utils.classForName(HIVE_SESSION_STATE_BUILDER_CLASS_NAME) Utils.classForName("org.apache.hadoop.hive.conf.HiveConf") true } catch { case _: ClassNotFoundException | _: NoClassDefFoundError => false } } private[spark] def cleanupAnyExistingSession(): Unit = { val session = getActiveSession.orElse(getDefaultSession) if (session.isDefined) { logWarning( s"""An existing Spark session exists as the active or default session. |This probably means another suite leaked it. Attempting to stop it before continuing. |This existing Spark session was created at: | |${session.get.creationSite.longForm} | """.stripMargin) session.get.stop() SparkSession.clearActiveSession() SparkSession.clearDefaultSession() } } /** * Initialize extensions specified in [[StaticSQLConf]]. The classes will be applied to the * extensions passed into this function. */ private def applyExtensions( sparkContext: SparkContext, extensions: SparkSessionExtensions): SparkSessionExtensions = { val extensionConfClassNames = sparkContext.getConf.get(StaticSQLConf.SPARK_SESSION_EXTENSIONS) .getOrElse(Seq.empty) extensionConfClassNames.foreach { extensionConfClassName => try { val extensionConfClass = Utils.classForName(extensionConfClassName) val extensionConf = extensionConfClass.getConstructor().newInstance() .asInstanceOf[SparkSessionExtensions => Unit] extensionConf(extensions) } catch { // Ignore the error if we cannot find the class or when the class has the wrong type. case e@(_: ClassCastException | _: ClassNotFoundException | _: NoClassDefFoundError) => logWarning(s"Cannot use $extensionConfClassName to configure session extensions.", e) } } extensions } /** * Load extensions from [[ServiceLoader]] and use them */ private def loadExtensions(extensions: SparkSessionExtensions): Unit = { val loader = ServiceLoader.load(classOf[SparkSessionExtensionsProvider], Utils.getContextOrSparkClassLoader) val loadedExts = loader.iterator() while (loadedExts.hasNext) { try { val ext = loadedExts.next() ext(extensions) } catch { case e: Throwable => logWarning("Failed to load session extension", e) } } }} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.spark.sqlimport java.io.Closeableimport java.util.{ServiceLoader, UUID}import java.util.concurrent.TimeUnit._import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}import scala.collection.JavaConverters._import scala.reflect.runtime.universe.TypeTagimport scala.util.control.NonFatalimport org.apache.spark.{SPARK_VERSION, SparkConf, SparkContext, TaskContext}import org.apache.spark.annotation.{DeveloperApi, Experimental, Stable, Unstable}import org.apache.spark.api.java.JavaRDDimport org.apache.spark.internal.Loggingimport org.apache.spark.internal.config.{ConfigEntry, EXECUTOR_ALLOW_SPARK_CONTEXT}import org.apache.spark.rdd.RDDimport org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}import org.apache.spark.sql.catalog.Catalogimport org.apache.spark.sql.catalyst._import org.apache.spark.sql.catalyst.analysis.{NameParameterizedQuery, PosParameterizedQuery, UnresolvedRelation}import org.apache.spark.sql.catalyst.encoders._import org.apache.spark.sql.catalyst.expressions.AttributeReferenceimport org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Range}import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributesimport org.apache.spark.sql.catalyst.util.CharVarcharUtilsimport org.apache.spark.sql.connector.ExternalCommandRunnerimport org.apache.spark.sql.errors.QueryCompilationErrorsimport org.apache.spark.sql.execution._import org.apache.spark.sql.execution.command.ExternalCommandExecutorimport org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}import org.apache.spark.sql.functions.litimport org.apache.spark.sql.internal._import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATIONimport org.apache.spark.sql.sources.BaseRelationimport org.apache.spark.sql.streaming._import org.apache.spark.sql.types.{DataType, StructType}import org.apache.spark.sql.util.ExecutionListenerManagerimport org.apache.spark.util.{CallSite, Utils}/** * The entry point to programming Spark with the Dataset and DataFrame API. * * In environments that this has been created upfront (e.g. REPL, notebooks), use the builder * to get an existing session: * * {{{ * SparkSession.builder().getOrCreate() * }}} * * The builder can also be used to create a new session: * * {{{ * SparkSession.builder * .master("local") * .appName("Word Count") * .config("spark.some.config.option", "some-value") * .getOrCreate() * }}} * * @param sparkContext The Spark context associated with this Spark session. * @param existingSharedState If supplied, use the existing shared state * instead of creating a new one. * @param parentSessionState If supplied, inherit all session state (i.e. temporary * views, SQL config, UDFs etc) from parent. */@Stableclass SparkSession private( @transient val sparkContext: SparkContext, @transient private val existingSharedState: Option[SharedState], @transient private val parentSessionState: Option[SessionState], @transient private[sql] val extensions: SparkSessionExtensions, @transient private[sql] val initialSessionOptions: Map[String, String]) extends Serializable with Closeable with Logging { self => // The call site where this SparkSession was constructed. private val creationSite: CallSite = Utils.getCallSite() /** * Constructor used in Pyspark. Contains explicit application of Spark Session Extensions * which otherwise only occurs during getOrCreate. We cannot add this to the default constructor * since that would cause every new session to reinvoke Spark Session Extensions on the currently * running extensions. */ private[sql] def this( sc: SparkContext, initialSessionOptions: java.util.HashMap[String, String]) = { this(sc, None, None, SparkSession.applyExtensions(sc, new SparkSessionExtensions), initialSessionOptions.asScala.toMap) } private[sql] def this(sc: SparkContext) = this(sc, new java.util.HashMap[String, String]()) private[sql] val sessionUUID: String = UUID.randomUUID.toString sparkContext.assertNotStopped() // If there is no active SparkSession, uses the default SQL conf. Otherwise, use the session's. SQLConf.setSQLConfGetter(() => { SparkSession.getActiveSession.filterNot(_.sparkContext.isStopped).map(_.sessionState.conf) .getOrElse(SQLConf.getFallbackConf) }) /** * The version of Spark on which this application is running. * * @since 2.0.0 */ def version: String = SPARK_VERSION /* ----------------------- * | Session-related state | * ----------------------- */ /** * State shared across sessions, including the `SparkContext`, cached data, listener, * and a catalog that interacts with external systems. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sharedState: SharedState = { existingSharedState.getOrElse(new SharedState(sparkContext, initialSessionOptions)) } /** * State isolated across sessions, including SQL configurations, temporary tables, registered * functions, and everything else that accepts a [[org.apache.spark.sql.internal.SQLConf]]. * If `parentSessionState` is not null, the `SessionState` will be a copy of the parent. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sessionState: SessionState = { parentSessionState .map(_.clone(this)) .getOrElse { val state = SparkSession.instantiateSessionState( SparkSession.sessionStateClassName(sharedState.conf), self) state } } /** * A wrapped version of this session in the form of a [[SQLContext]], for backward compatibility. * * @since 2.0.0 */ @transient val sqlContext: SQLContext = new SQLContext(this) /** * Runtime configuration interface for Spark. * * This is the interface through which the user can get and set all Spark and Hadoop * configurations that are relevant to Spark SQL. When getting the value of a config, * this defaults to the value set in the underlying `SparkContext`, if any. * * @since 2.0.0 */ @transient lazy val conf: RuntimeConfig = new RuntimeConfig(sessionState.conf) /** * An interface to register custom [[org.apache.spark.sql.util.QueryExecutionListener]]s * that listen for execution metrics. * * @since 2.0.0 */ def listenerManager: ExecutionListenerManager = sessionState.listenerManager /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @since 2.0.0 */ @Experimental @Unstable def experimental: ExperimentalMethods = sessionState.experimentalMethods /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sparkSession.udf.register("myUDF", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sparkSession.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @note The user-defined functions must be deterministic. Due to optimization, * duplicate invocations may be eliminated or the function may even be invoked more times than * it is present in the query. * * @since 2.0.0 */ def udf: UDFRegistration = sessionState.udfRegistration def udtf: UDTFRegistration = sessionState.udtfRegistration /** * Returns a `StreamingQueryManager` that allows managing all the * `StreamingQuery`s active on `this`. * * @since 2.0.0 */ @Unstable def streams: StreamingQueryManager = sessionState.streamingQueryManager /** * Start a new session with isolated SQL configurations, temporary tables, registered * functions are isolated, but sharing the underlying `SparkContext` and cached data. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. * * @since 2.0.0 */ def newSession(): SparkSession = { new SparkSession( sparkContext, Some(sharedState), parentSessionState = None, extensions, initialSessionOptions) } /** * Create an identical copy of this `SparkSession`, sharing the underlying `SparkContext` * and shared state. All the state of this session (i.e. SQL configurations, temporary tables, * registered functions) is copied over, and the cloned session is set up with the same shared * state as this session. The cloned session is independent of this session, that is, any * non-global change in either session is not reflected in the other. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. */ private[sql] def cloneSession(): SparkSession = { val result = new SparkSession( sparkContext, Some(sharedState), Some(sessionState), extensions, Map.empty) result.sessionState // force copy of SessionState result } /* --------------------------------- * | Methods for creating DataFrames | * --------------------------------- */ /** * Returns a `DataFrame` with no rows or columns. * * @since 2.0.0 */ @transient lazy val emptyDataFrame: DataFrame = Dataset.ofRows(self, LocalRelation()) /** * Creates a new [[Dataset]] of type T containing zero elements. * * @since 2.0.0 */ def emptyDataset[T: Encoder]: Dataset[T] = { val encoder = implicitly[Encoder[T]] new Dataset(self, LocalRelation(encoder.schema), encoder) } /** * Creates a `DataFrame` from an RDD of Product (e.g. case classes, tuples). * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = withActive { val encoder = Encoders.product[A] Dataset.ofRows(self, ExternalRDD(rdd, self)(encoder)) } /** * Creates a `DataFrame` from a local Seq of Product. * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = withActive { val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = toAttributes(schema) Dataset.ofRows(self, LocalRelation.fromProduct(attributeSeq, data)) } /** * :: DeveloperApi :: * Creates a `DataFrame` from an `RDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sparkSession = new org.apache.spark.sql.SparkSession(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sparkSession.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.createOrReplaceTempView("people") * sparkSession.sql("select name from people").collect.foreach(println) * }}} * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val encoder = ExpressionEncoder(replaced) val toRow = encoder.createSerializer() val catalystRows = rowRDD.map(toRow) internalCreateDataFrame(catalystRows.setName(rowRDD.name), schema) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `JavaRDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] createDataFrame(rowRDD.rdd, replaced) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `java.util.List` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided List matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] Dataset.ofRows(self, LocalRelation.fromExternalRows(toAttributes(replaced), rows.asScala.toSeq)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = withActive { val attributeSeq: Seq[AttributeReference] = getSchema(beanClass) val className = beanClass.getName val rowRdd = rdd.mapPartitions { iter => // BeanInfo is not serializable so we must rediscover it remotely for each partition. SQLContext.beansToRows(iter, Utils.classForName(className), attributeSeq) } Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRdd.setName(rdd.name))(self)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd.rdd, beanClass) } /** * Applies a schema to a List of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @since 1.6.0 */ def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = withActive { val attrSeq = getSchema(beanClass) val rows = SQLContext.beansToRows(data.asScala.iterator, beanClass, attrSeq) Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq)) } /** * Convert a `BaseRelation` created for external data sources into a `DataFrame`. * * @since 2.0.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { Dataset.ofRows(self, LogicalRelation(baseRelation)) } /* ------------------------------- * | Methods for creating DataSets | * ------------------------------- */ /** * Creates a [[Dataset]] from a local Seq of data of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Example == * * {{{ * * import spark.implicits._ * case class Person(name: String, age: Long) * val data = Seq(Person("Michael", 29), Person("Andy", 30), Person("Justin", 19)) * val ds = spark.createDataset(data) * * ds.show() * // +-------+---+ * // | name|age| * // +-------+---+ * // |Michael| 29| * // | Andy| 30| * // | Justin| 19| * // +-------+---+ * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = { val enc = encoderFor[T] val toRow = enc.createSerializer() val attributes = toAttributes(enc.schema) val encoded = data.map(d => toRow(d).copy()) val plan = new LocalRelation(attributes, encoded) Dataset[T](self, plan) } /** * Creates a [[Dataset]] from an RDD of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * @since 2.0.0 */ def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = { Dataset[T](self, ExternalRDD(data, self)) } /** * Creates a [[Dataset]] from a `java.util.List` of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Java Example == * * {{{ * List<String> data = Arrays.asList("hello", "world"); * Dataset<String> ds = spark.createDataset(data, Encoders.STRING()); * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = { createDataset(data.asScala.toSeq) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from 0 to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(end: Long): Dataset[java.lang.Long] = range(0, end) /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(start: Long, end: Long): Dataset[java.lang.Long] = { range(start, end, step = 1, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long): Dataset[java.lang.Long] = { range(start, end, step, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value, with partition number * specified. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = { new Dataset(self, Range(start, end, step, numPartitions), Encoders.LONG) } /** * Creates a `DataFrame` from an `RDD[InternalRow]`. */ private[sql] def internalCreateDataFrame( catalystRows: RDD[InternalRow], schema: StructType, isStreaming: Boolean = false): DataFrame = { // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val logicalPlan = LogicalRDD( toAttributes(schema), catalystRows, isStreaming = isStreaming)(self) Dataset.ofRows(self, logicalPlan) } /* ------------------------- * | Catalog-related methods | * ------------------------- */ /** * Interface through which the user may create, drop, alter or query underlying * databases, tables, functions etc. * * @since 2.0.0 */ @transient lazy val catalog: Catalog = new CatalogImpl(self) /** * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, * the returned DataFrame is simply the query plan of the view, which can either be a batch or * streaming query plan. * * @param tableName is either a qualified or unqualified name that designates a table or view. * If a database is specified, it identifies the table/view from the database. * Otherwise, it first attempts to find a temporary view with the given name * and then match the table/view from the current database. * Note that, the global temporary view database is also valid here. * @since 2.0.0 */ def table(tableName: String): DataFrame = { read.table(tableName) } private[sql] def table(tableIdent: TableIdentifier): DataFrame = { Dataset.ofRows(self, UnresolvedRelation(tableIdent)) } /* ----------------- * | Everything else | * ----------------- */ /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql(sqlText: String, args: Array[_], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { PosParameterizedQuery(parsedPlan, args.map(lit(_).expr)) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.5.0 */ @Experimental def sql(sqlText: String, args: Array[_]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql( sqlText: String, args: Map[String, Any], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { NameParameterizedQuery(parsedPlan, args.mapValues(lit(_).expr).toMap) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: Map[String, Any]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: java.util.Map[String, Any]): DataFrame = { sql(sqlText, args.asScala.toMap) } /** * Executes a SQL query using Spark, returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @since 2.0.0 */ def sql(sqlText: String): DataFrame = sql(sqlText, Map.empty[String, Any]) /** * Execute an arbitrary string command inside an external execution engine rather than Spark. * This could be useful when user wants to execute some commands out of Spark. For * example, executing custom DDL/DML command for JDBC, creating index for ElasticSearch, * creating cores for Solr and so on. * * The command will be eagerly executed after this method is called and the returned * DataFrame will contain the output of the command(if any). * * @param runner The class name of the runner that implements `ExternalCommandRunner`. * @param command The target command to be executed * @param options The options for the runner. * * @since 3.0.0 */ @Unstable def executeCommand(runner: String, command: String, options: Map[String, String]): DataFrame = { DataSource.lookupDataSource(runner, sessionState.conf) match { case source if classOf[ExternalCommandRunner].isAssignableFrom(source) => Dataset.ofRows(self, ExternalCommandExecutor( source.newInstance().asInstanceOf[ExternalCommandRunner], command, options)) case _ => throw QueryCompilationErrors.commandExecutionInRunnerUnsupportedError(runner) } } /** * Returns a [[DataFrameReader]] that can be used to read non-streaming data in as a * `DataFrame`. * {{{ * sparkSession.read.parquet("/path/to/file.parquet") * sparkSession.read.schema(schema).json("/path/to/file.json") * }}} * * @since 2.0.0 */ def read: DataFrameReader = new DataFrameReader(self) /** * Returns a `DataStreamReader` that can be used to read streaming data in as a `DataFrame`. * {{{ * sparkSession.readStream.parquet("/path/to/directory/of/parquet/files") * sparkSession.readStream.schema(schema).json("/path/to/directory/of/json/files") * }}} * * @since 2.0.0 */ def readStream: DataStreamReader = new DataStreamReader(self) /** * Executes some code block and prints to stdout the time taken to execute the block. This is * available in Scala only and is used primarily for interactive testing and debugging. * * @since 2.1.0 */ def time[T](f: => T): T = { val start = System.nanoTime() val ret = f val end = System.nanoTime() // scalastyle:off println println(s"Time taken: ${NANOSECONDS.toMillis(end - start)} ms") // scalastyle:on println ret } // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into `DataFrame`s. * * {{{ * val sparkSession = SparkSession.builder.getOrCreate() * import sparkSession.implicits._ * }}} * * @since 2.0.0 */ object implicits extends SQLImplicits with Serializable { protected override def _sqlContext: SQLContext = SparkSession.this.sqlContext } // scalastyle:on /** * Stop the underlying `SparkContext`. * * @since 2.0.0 */ def stop(): Unit = { sparkContext.stop() } /** * Synonym for `stop()`. * * @since 2.1.0 */ override def close(): Unit = stop() /** * Parses the data type in our internal string representation. The data type string should * have the same format as the one generated by `toString` in scala. * It is only used by PySpark. */ protected[sql] def parseDataType(dataTypeString: String): DataType = { DataType.fromJson(dataTypeString) } /** * Apply a schema defined by the schemaString to an RDD. It is only used by PySpark. */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schemaString: String): DataFrame = { val schema = DataType.fromJson(schemaString).asInstanceOf[StructType] applySchemaToPythonRDD(rdd, schema) } /** * Apply `schema` to an RDD. * * @note Used by PySpark only */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schema: StructType): DataFrame = { val rowRdd = rdd.mapPartitions { iter => val fromJava = python.EvaluatePython.makeFromJava(schema) iter.map(r => fromJava(r).asInstanceOf[InternalRow]) } internalCreateDataFrame(rowRdd, schema) } /** * Returns a Catalyst Schema for the given java bean class. */ private def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } } /** * Execute a block of code with the this session set as the active session, and restore the * previous session on completion. */ private[sql] def withActive[T](block: => T): T = { // Use the active session thread local directly to make sure we get the session that is actually // set and not the default session. This to prevent that we promote the default session to the // active session once we are done. val old = SparkSession.activeThreadSession.get() SparkSession.setActiveSession(this) try block finally { SparkSession.setActiveSession(old) } } private[sql] def leafNodeDefaultParallelism: Int = { conf.get(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM).getOrElse(sparkContext.defaultParallelism) }}@Stableobject SparkSession extends Logging { /** * Builder for [[SparkSession]]. */ @Stable class Builder extends Logging { private[this] val options = new scala.collection.mutable.HashMap[String, String] private[this] val extensions = new SparkSessionExtensions private[this] var userSuppliedContext: Option[SparkContext] = None private[spark] def sparkContext(sparkContext: SparkContext): Builder = synchronized { userSuppliedContext = Option(sparkContext) this } /** * Sets a name for the application, which will be shown in the Spark web UI. * If no application name is set, a randomly generated name will be used. * * @since 2.0.0 */ def appName(name: String): Builder = config("spark.app.name", name) /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: String): Builder = synchronized { options += key -> value this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Long): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Double): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Boolean): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: Map[String, Any]): Builder = synchronized { map.foreach { kv: (String, Any) => { options += kv._1 -> kv._2.toString } } this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: java.util.Map[String, Any]): Builder = synchronized { config(map.asScala.toMap) } /** * Sets a list of config options based on the given `SparkConf`. * * @since 2.0.0 */ def config(conf: SparkConf): Builder = synchronized { conf.getAll.foreach { case (k, v) => options += k -> v } this } /** * Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. * * @since 2.0.0 */ def master(master: String): Builder = config("spark.master", master) /** * Enables Hive support, including connectivity to a persistent Hive metastore, support for * Hive serdes, and Hive user-defined functions. * * @since 2.0.0 */ def enableHiveSupport(): Builder = synchronized { if (hiveClassesArePresent) { config(CATALOG_IMPLEMENTATION.key, "hive") } else { throw new IllegalArgumentException( "Unable to instantiate SparkSession with Hive support because " + "Hive classes are not found.") } } /** * Inject extensions into the [[SparkSession]]. This allows a user to add Analyzer rules, * Optimizer rules, Planning Strategies or a customized parser. * * @since 2.2.0 */ def withExtensions(f: SparkSessionExtensions => Unit): Builder = synchronized { f(extensions) this } /** * Gets an existing [[SparkSession]] or, if there is no existing one, creates a new * one based on the options set in this builder. * * This method first checks whether there is a valid thread-local SparkSession, * and if yes, return that one. It then checks whether there is a valid global * default SparkSession, and if yes, return that one. If no valid global default * SparkSession exists, the method creates a new SparkSession and assigns the * newly created SparkSession as the global default. * * In case an existing SparkSession is returned, the non-static config options specified in * this builder will be applied to the existing SparkSession. * * @since 2.0.0 */ def getOrCreate(): SparkSession = synchronized { val sparkConf = new SparkConf() options.foreach { case (k, v) => sparkConf.set(k, v) } if (!sparkConf.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { assertOnDriver() } // Get the session from current thread's active session. var session = activeThreadSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // Global synchronization so we will only set the default session once. SparkSession.synchronized { // If the current thread does not have an active session, get it from the global session. session = defaultSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // No active nor global default session. Create a new one. val sparkContext = userSuppliedContext.getOrElse { // set a random app name if not given. if (!sparkConf.contains("spark.app.name")) { sparkConf.setAppName(java.util.UUID.randomUUID().toString) } SparkContext.getOrCreate(sparkConf) // Do not update `SparkConf` for existing `SparkContext`, as it's shared by all sessions. } loadExtensions(extensions) applyExtensions(sparkContext, extensions) session = new SparkSession(sparkContext, None, None, extensions, options.toMap) setDefaultSession(session) setActiveSession(session) registerContextListener(sparkContext) } return session } } /** * Creates a [[SparkSession.Builder]] for constructing a [[SparkSession]]. * * @since 2.0.0 */ def builder(): Builder = new Builder /** * Changes the SparkSession that will be returned in this thread and its children when * SparkSession.getOrCreate() is called. This can be used to ensure that a given thread receives * a SparkSession with an isolated session, instead of the global (first created) context. * * @since 2.0.0 */ def setActiveSession(session: SparkSession): Unit = { activeThreadSession.set(session) } /** * Clears the active SparkSession for current thread. Subsequent calls to getOrCreate will * return the first created context instead of a thread-local override. * * @since 2.0.0 */ def clearActiveSession(): Unit = { activeThreadSession.remove() } /** * Sets the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def setDefaultSession(session: SparkSession): Unit = { defaultSession.set(session) } /** * Clears the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def clearDefaultSession(): Unit = { defaultSession.set(null) } /** * Returns the active SparkSession for the current thread, returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(activeThreadSession.get) } } /** * Returns the default SparkSession that is returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getDefaultSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(defaultSession.get) } } /** * Returns the currently active SparkSession, otherwise the default one. If there is no default * SparkSession, throws an exception. * * @since 2.4.0 */ def active: SparkSession = { getActiveSession.getOrElse(getDefaultSession.getOrElse( throw new IllegalStateException("No active or default Spark session found"))) } /** * Apply modifiable settings to an existing [[SparkSession]]. This method are used * both in Scala and Python, so put this under [[SparkSession]] object. */ private[sql] def applyModifiableSettings( session: SparkSession, options: java.util.HashMap[String, String]): Unit = { // Lazy val to avoid an unnecessary session state initialization lazy val conf = session.sessionState.conf val dedupOptions = if (options.isEmpty) Map.empty[String, String] else ( options.asScala.toSet -- conf.getAllConfs.toSet).toMap val (staticConfs, otherConfs) = dedupOptions.partition(kv => SQLConf.isStaticConfigKey(kv._1)) otherConfs.foreach { case (k, v) => conf.setConfString(k, v) } // Note that other runtime SQL options, for example, for other third-party datasource // can be marked as an ignored configuration here. val maybeIgnoredConfs = otherConfs.filterNot { case (k, _) => conf.isModifiable(k) } if (staticConfs.nonEmpty || maybeIgnoredConfs.nonEmpty) { logWarning( "Using an existing Spark session; only runtime SQL configurations will take effect.") } if (staticConfs.nonEmpty) { logDebug("Ignored static SQL configurations:\n " + conf.redactOptions(staticConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } if (maybeIgnoredConfs.nonEmpty) { // Only print out non-static and non-runtime SQL configurations. // Note that this might show core configurations or source specific // options defined in the third-party datasource. logDebug("Configurations that might not take effect:\n " + conf.redactOptions( maybeIgnoredConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } } /** * Returns a cloned SparkSession with all specified configurations disabled, or * the original SparkSession if all configurations are already disabled. */ private[sql] def getOrCloneSessionWithConfigsOff( session: SparkSession, configurations: Seq[ConfigEntry[Boolean]]): SparkSession = { val configsEnabled = configurations.filter(session.conf.get[Boolean]) if (configsEnabled.isEmpty) { session } else { val newSession = session.cloneSession() configsEnabled.foreach(conf => { newSession.conf.set(conf, false) }) newSession } } //////////////////////////////////////////////////////////////////////////////////////// // Private methods from now on //////////////////////////////////////////////////////////////////////////////////////// private val listenerRegistered: AtomicBoolean = new AtomicBoolean(false) /** Register the AppEnd listener onto the Context */ private def registerContextListener(sparkContext: SparkContext): Unit = { if (!listenerRegistered.get()) { sparkContext.addSparkListener(new SparkListener { override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { defaultSession.set(null) listenerRegistered.set(false) } }) listenerRegistered.set(true) } } /** The active SparkSession for the current thread. */ private val activeThreadSession = new InheritableThreadLocal[SparkSession] /** Reference to the root SparkSession. */ private val defaultSession = new AtomicReference[SparkSession] private val HIVE_SESSION_STATE_BUILDER_CLASS_NAME = "org.apache.spark.sql.hive.HiveSessionStateBuilder" private def sessionStateClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_SESSION_STATE_BUILDER_CLASS_NAME case "in-memory" => classOf[SessionStateBuilder].getCanonicalName } } private def assertOnDriver(): Unit = { if (TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkSession should only be created and accessed on the driver.") } } /** * Helper method to create an instance of `SessionState` based on `className` from conf. * The result is either `SessionState` or a Hive based `SessionState`. */ private def instantiateSessionState( className: String, sparkSession: SparkSession): SessionState = { try { // invoke new [Hive]SessionStateBuilder( // SparkSession, // Option[SessionState]) val clazz = Utils.classForName(className) val ctor = clazz.getConstructors.head ctor.newInstance(sparkSession, None).asInstanceOf[BaseSessionStateBuilder].build() } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Error while instantiating '$className':", e) } } /** * @return true if Hive classes can be loaded, otherwise false. */ private[spark] def hiveClassesArePresent: Boolean = { try { Utils.classForName(HIVE_SESSION_STATE_BUILDER_CLASS_NAME) Utils.classForName("org.apache.hadoop.hive.conf.HiveConf") true } catch { case _: ClassNotFoundException | _: NoClassDefFoundError => false } } private[spark] def cleanupAnyExistingSession(): Unit = { val session = getActiveSession.orElse(getDefaultSession) if (session.isDefined) { logWarning( s"""An existing Spark session exists as the active or default session. |This probably means another suite leaked it. Attempting to stop it before continuing. |This existing Spark session was created at: | |${session.get.creationSite.longForm} | """.stripMargin) session.get.stop() SparkSession.clearActiveSession() SparkSession.clearDefaultSession() } } /** * Initialize extensions specified in [[StaticSQLConf]]. The classes will be applied to the * extensions passed into this function. */ private def applyExtensions( sparkContext: SparkContext, extensions: SparkSessionExtensions): SparkSessionExtensions = { val extensionConfClassNames = sparkContext.getConf.get(StaticSQLConf.SPARK_SESSION_EXTENSIONS) .getOrElse(Seq.empty) extensionConfClassNames.foreach { extensionConfClassName => try { val extensionConfClass = Utils.classForName(extensionConfClassName) val extensionConf = extensionConfClass.getConstructor().newInstance() .asInstanceOf[SparkSessionExtensions => Unit] extensionConf(extensions) } catch { // Ignore the error if we cannot find the class or when the class has the wrong type. case e@(_: ClassCastException | _: ClassNotFoundException | _: NoClassDefFoundError) => logWarning(s"Cannot use $extensionConfClassName to configure session extensions.", e) } } extensions } /** * Load extensions from [[ServiceLoader]] and use them */ private def loadExtensions(extensions: SparkSessionExtensions): Unit = { val loader = ServiceLoader.load(classOf[SparkSessionExtensionsProvider], Utils.getContextOrSparkClassLoader) val loadedExts = loader.iterator() while (loadedExts.hasNext) { try { val ext = loadedExts.next() ext(extensions) } catch { case e: Throwable => logWarning("Failed to load session extension", e) } } }} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.spark.sqlimport java.io.Closeableimport java.util.{ServiceLoader, UUID}import java.util.concurrent.TimeUnit._import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}import scala.collection.JavaConverters._import scala.reflect.runtime.universe.TypeTagimport scala.util.control.NonFatalimport org.apache.spark.{SPARK_VERSION, SparkConf, SparkContext, TaskContext}import org.apache.spark.annotation.{DeveloperApi, Experimental, Stable, Unstable}import org.apache.spark.api.java.JavaRDDimport org.apache.spark.internal.Loggingimport org.apache.spark.internal.config.{ConfigEntry, EXECUTOR_ALLOW_SPARK_CONTEXT}import org.apache.spark.rdd.RDDimport org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}import org.apache.spark.sql.catalog.Catalogimport org.apache.spark.sql.catalyst._import org.apache.spark.sql.catalyst.analysis.{NameParameterizedQuery, PosParameterizedQuery, UnresolvedRelation}import org.apache.spark.sql.catalyst.encoders._import org.apache.spark.sql.catalyst.expressions.AttributeReferenceimport org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Range}import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributesimport org.apache.spark.sql.catalyst.util.CharVarcharUtilsimport org.apache.spark.sql.connector.ExternalCommandRunnerimport org.apache.spark.sql.errors.QueryCompilationErrorsimport org.apache.spark.sql.execution._import org.apache.spark.sql.execution.command.ExternalCommandExecutorimport org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}import org.apache.spark.sql.functions.litimport org.apache.spark.sql.internal._import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATIONimport org.apache.spark.sql.sources.BaseRelationimport org.apache.spark.sql.streaming._import org.apache.spark.sql.types.{DataType, StructType}import org.apache.spark.sql.util.ExecutionListenerManagerimport org.apache.spark.util.{CallSite, Utils}/** * The entry point to programming Spark with the Dataset and DataFrame API. * * In environments that this has been created upfront (e.g. REPL, notebooks), use the builder * to get an existing session: * * {{{ * SparkSession.builder().getOrCreate() * }}} * * The builder can also be used to create a new session: * * {{{ * SparkSession.builder * .master("local") * .appName("Word Count") * .config("spark.some.config.option", "some-value") * .getOrCreate() * }}} * * @param sparkContext The Spark context associated with this Spark session. * @param existingSharedState If supplied, use the existing shared state * instead of creating a new one. * @param parentSessionState If supplied, inherit all session state (i.e. temporary * views, SQL config, UDFs etc) from parent. */@Stableclass SparkSession private( @transient val sparkContext: SparkContext, @transient private val existingSharedState: Option[SharedState], @transient private val parentSessionState: Option[SessionState], @transient private[sql] val extensions: SparkSessionExtensions, @transient private[sql] val initialSessionOptions: Map[String, String]) extends Serializable with Closeable with Logging { self => // The call site where this SparkSession was constructed. private val creationSite: CallSite = Utils.getCallSite() /** * Constructor used in Pyspark. Contains explicit application of Spark Session Extensions * which otherwise only occurs during getOrCreate. We cannot add this to the default constructor * since that would cause every new session to reinvoke Spark Session Extensions on the currently * running extensions. */ private[sql] def this( sc: SparkContext, initialSessionOptions: java.util.HashMap[String, String]) = { this(sc, None, None, SparkSession.applyExtensions(sc, new SparkSessionExtensions), initialSessionOptions.asScala.toMap) } private[sql] def this(sc: SparkContext) = this(sc, new java.util.HashMap[String, String]()) private[sql] val sessionUUID: String = UUID.randomUUID.toString sparkContext.assertNotStopped() // If there is no active SparkSession, uses the default SQL conf. Otherwise, use the session's. SQLConf.setSQLConfGetter(() => { SparkSession.getActiveSession.filterNot(_.sparkContext.isStopped).map(_.sessionState.conf) .getOrElse(SQLConf.getFallbackConf) }) /** * The version of Spark on which this application is running. * * @since 2.0.0 */ def version: String = SPARK_VERSION /* ----------------------- * | Session-related state | * ----------------------- */ /** * State shared across sessions, including the `SparkContext`, cached data, listener, * and a catalog that interacts with external systems. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sharedState: SharedState = { existingSharedState.getOrElse(new SharedState(sparkContext, initialSessionOptions)) } /** * State isolated across sessions, including SQL configurations, temporary tables, registered * functions, and everything else that accepts a [[org.apache.spark.sql.internal.SQLConf]]. * If `parentSessionState` is not null, the `SessionState` will be a copy of the parent. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sessionState: SessionState = { parentSessionState .map(_.clone(this)) .getOrElse { val state = SparkSession.instantiateSessionState( SparkSession.sessionStateClassName(sharedState.conf), self) state } } /** * A wrapped version of this session in the form of a [[SQLContext]], for backward compatibility. * * @since 2.0.0 */ @transient val sqlContext: SQLContext = new SQLContext(this) /** * Runtime configuration interface for Spark. * * This is the interface through which the user can get and set all Spark and Hadoop * configurations that are relevant to Spark SQL. When getting the value of a config, * this defaults to the value set in the underlying `SparkContext`, if any. * * @since 2.0.0 */ @transient lazy val conf: RuntimeConfig = new RuntimeConfig(sessionState.conf) /** * An interface to register custom [[org.apache.spark.sql.util.QueryExecutionListener]]s * that listen for execution metrics. * * @since 2.0.0 */ def listenerManager: ExecutionListenerManager = sessionState.listenerManager /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @since 2.0.0 */ @Experimental @Unstable def experimental: ExperimentalMethods = sessionState.experimentalMethods /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sparkSession.udf.register("myUDF", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sparkSession.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @note The user-defined functions must be deterministic. Due to optimization, * duplicate invocations may be eliminated or the function may even be invoked more times than * it is present in the query. * * @since 2.0.0 */ def udf: UDFRegistration = sessionState.udfRegistration def udtf: UDTFRegistration = sessionState.udtfRegistration /** * Returns a `StreamingQueryManager` that allows managing all the * `StreamingQuery`s active on `this`. * * @since 2.0.0 */ @Unstable def streams: StreamingQueryManager = sessionState.streamingQueryManager /** * Start a new session with isolated SQL configurations, temporary tables, registered * functions are isolated, but sharing the underlying `SparkContext` and cached data. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. * * @since 2.0.0 */ def newSession(): SparkSession = { new SparkSession( sparkContext, Some(sharedState), parentSessionState = None, extensions, initialSessionOptions) } /** * Create an identical copy of this `SparkSession`, sharing the underlying `SparkContext` * and shared state. All the state of this session (i.e. SQL configurations, temporary tables, * registered functions) is copied over, and the cloned session is set up with the same shared * state as this session. The cloned session is independent of this session, that is, any * non-global change in either session is not reflected in the other. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. */ private[sql] def cloneSession(): SparkSession = { val result = new SparkSession( sparkContext, Some(sharedState), Some(sessionState), extensions, Map.empty) result.sessionState // force copy of SessionState result } /* --------------------------------- * | Methods for creating DataFrames | * --------------------------------- */ /** * Returns a `DataFrame` with no rows or columns. * * @since 2.0.0 */ @transient lazy val emptyDataFrame: DataFrame = Dataset.ofRows(self, LocalRelation()) /** * Creates a new [[Dataset]] of type T containing zero elements. * * @since 2.0.0 */ def emptyDataset[T: Encoder]: Dataset[T] = { val encoder = implicitly[Encoder[T]] new Dataset(self, LocalRelation(encoder.schema), encoder) } /** * Creates a `DataFrame` from an RDD of Product (e.g. case classes, tuples). * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = withActive { val encoder = Encoders.product[A] Dataset.ofRows(self, ExternalRDD(rdd, self)(encoder)) } /** * Creates a `DataFrame` from a local Seq of Product. * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = withActive { val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = toAttributes(schema) Dataset.ofRows(self, LocalRelation.fromProduct(attributeSeq, data)) } /** * :: DeveloperApi :: * Creates a `DataFrame` from an `RDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sparkSession = new org.apache.spark.sql.SparkSession(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sparkSession.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.createOrReplaceTempView("people") * sparkSession.sql("select name from people").collect.foreach(println) * }}} * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val encoder = ExpressionEncoder(replaced) val toRow = encoder.createSerializer() val catalystRows = rowRDD.map(toRow) internalCreateDataFrame(catalystRows.setName(rowRDD.name), schema) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `JavaRDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] createDataFrame(rowRDD.rdd, replaced) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `java.util.List` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided List matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] Dataset.ofRows(self, LocalRelation.fromExternalRows(toAttributes(replaced), rows.asScala.toSeq)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = withActive { val attributeSeq: Seq[AttributeReference] = getSchema(beanClass) val className = beanClass.getName val rowRdd = rdd.mapPartitions { iter => // BeanInfo is not serializable so we must rediscover it remotely for each partition. SQLContext.beansToRows(iter, Utils.classForName(className), attributeSeq) } Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRdd.setName(rdd.name))(self)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd.rdd, beanClass) } /** * Applies a schema to a List of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @since 1.6.0 */ def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = withActive { val attrSeq = getSchema(beanClass) val rows = SQLContext.beansToRows(data.asScala.iterator, beanClass, attrSeq) Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq)) } /** * Convert a `BaseRelation` created for external data sources into a `DataFrame`. * * @since 2.0.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { Dataset.ofRows(self, LogicalRelation(baseRelation)) } /* ------------------------------- * | Methods for creating DataSets | * ------------------------------- */ /** * Creates a [[Dataset]] from a local Seq of data of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Example == * * {{{ * * import spark.implicits._ * case class Person(name: String, age: Long) * val data = Seq(Person("Michael", 29), Person("Andy", 30), Person("Justin", 19)) * val ds = spark.createDataset(data) * * ds.show() * // +-------+---+ * // | name|age| * // +-------+---+ * // |Michael| 29| * // | Andy| 30| * // | Justin| 19| * // +-------+---+ * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = { val enc = encoderFor[T] val toRow = enc.createSerializer() val attributes = toAttributes(enc.schema) val encoded = data.map(d => toRow(d).copy()) val plan = new LocalRelation(attributes, encoded) Dataset[T](self, plan) } /** * Creates a [[Dataset]] from an RDD of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * @since 2.0.0 */ def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = { Dataset[T](self, ExternalRDD(data, self)) } /** * Creates a [[Dataset]] from a `java.util.List` of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Java Example == * * {{{ * List<String> data = Arrays.asList("hello", "world"); * Dataset<String> ds = spark.createDataset(data, Encoders.STRING()); * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = { createDataset(data.asScala.toSeq) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from 0 to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(end: Long): Dataset[java.lang.Long] = range(0, end) /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(start: Long, end: Long): Dataset[java.lang.Long] = { range(start, end, step = 1, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long): Dataset[java.lang.Long] = { range(start, end, step, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value, with partition number * specified. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = { new Dataset(self, Range(start, end, step, numPartitions), Encoders.LONG) } /** * Creates a `DataFrame` from an `RDD[InternalRow]`. */ private[sql] def internalCreateDataFrame( catalystRows: RDD[InternalRow], schema: StructType, isStreaming: Boolean = false): DataFrame = { // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val logicalPlan = LogicalRDD( toAttributes(schema), catalystRows, isStreaming = isStreaming)(self) Dataset.ofRows(self, logicalPlan) } /* ------------------------- * | Catalog-related methods | * ------------------------- */ /** * Interface through which the user may create, drop, alter or query underlying * databases, tables, functions etc. * * @since 2.0.0 */ @transient lazy val catalog: Catalog = new CatalogImpl(self) /** * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, * the returned DataFrame is simply the query plan of the view, which can either be a batch or * streaming query plan. * * @param tableName is either a qualified or unqualified name that designates a table or view. * If a database is specified, it identifies the table/view from the database. * Otherwise, it first attempts to find a temporary view with the given name * and then match the table/view from the current database. * Note that, the global temporary view database is also valid here. * @since 2.0.0 */ def table(tableName: String): DataFrame = { read.table(tableName) } private[sql] def table(tableIdent: TableIdentifier): DataFrame = { Dataset.ofRows(self, UnresolvedRelation(tableIdent)) } /* ----------------- * | Everything else | * ----------------- */ /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql(sqlText: String, args: Array[_], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { PosParameterizedQuery(parsedPlan, args.map(lit(_).expr)) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.5.0 */ @Experimental def sql(sqlText: String, args: Array[_]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql( sqlText: String, args: Map[String, Any], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { NameParameterizedQuery(parsedPlan, args.mapValues(lit(_).expr).toMap) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: Map[String, Any]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: java.util.Map[String, Any]): DataFrame = { sql(sqlText, args.asScala.toMap) } /** * Executes a SQL query using Spark, returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @since 2.0.0 */ def sql(sqlText: String): DataFrame = sql(sqlText, Map.empty[String, Any]) /** * Execute an arbitrary string command inside an external execution engine rather than Spark. * This could be useful when user wants to execute some commands out of Spark. For * example, executing custom DDL/DML command for JDBC, creating index for ElasticSearch, * creating cores for Solr and so on. * * The command will be eagerly executed after this method is called and the returned * DataFrame will contain the output of the command(if any). * * @param runner The class name of the runner that implements `ExternalCommandRunner`. * @param command The target command to be executed * @param options The options for the runner. * * @since 3.0.0 */ @Unstable def executeCommand(runner: String, command: String, options: Map[String, String]): DataFrame = { DataSource.lookupDataSource(runner, sessionState.conf) match { case source if classOf[ExternalCommandRunner].isAssignableFrom(source) => Dataset.ofRows(self, ExternalCommandExecutor( source.newInstance().asInstanceOf[ExternalCommandRunner], command, options)) case _ => throw QueryCompilationErrors.commandExecutionInRunnerUnsupportedError(runner) } } /** * Returns a [[DataFrameReader]] that can be used to read non-streaming data in as a * `DataFrame`. * {{{ * sparkSession.read.parquet("/path/to/file.parquet") * sparkSession.read.schema(schema).json("/path/to/file.json") * }}} * * @since 2.0.0 */ def read: DataFrameReader = new DataFrameReader(self) /** * Returns a `DataStreamReader` that can be used to read streaming data in as a `DataFrame`. * {{{ * sparkSession.readStream.parquet("/path/to/directory/of/parquet/files") * sparkSession.readStream.schema(schema).json("/path/to/directory/of/json/files") * }}} * * @since 2.0.0 */ def readStream: DataStreamReader = new DataStreamReader(self) /** * Executes some code block and prints to stdout the time taken to execute the block. This is * available in Scala only and is used primarily for interactive testing and debugging. * * @since 2.1.0 */ def time[T](f: => T): T = { val start = System.nanoTime() val ret = f val end = System.nanoTime() // scalastyle:off println println(s"Time taken: ${NANOSECONDS.toMillis(end - start)} ms") // scalastyle:on println ret } // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into `DataFrame`s. * * {{{ * val sparkSession = SparkSession.builder.getOrCreate() * import sparkSession.implicits._ * }}} * * @since 2.0.0 */ object implicits extends SQLImplicits with Serializable { protected override def _sqlContext: SQLContext = SparkSession.this.sqlContext } // scalastyle:on /** * Stop the underlying `SparkContext`. * * @since 2.0.0 */ def stop(): Unit = { sparkContext.stop() } /** * Synonym for `stop()`. * * @since 2.1.0 */ override def close(): Unit = stop() /** * Parses the data type in our internal string representation. The data type string should * have the same format as the one generated by `toString` in scala. * It is only used by PySpark. */ protected[sql] def parseDataType(dataTypeString: String): DataType = { DataType.fromJson(dataTypeString) } /** * Apply a schema defined by the schemaString to an RDD. It is only used by PySpark. */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schemaString: String): DataFrame = { val schema = DataType.fromJson(schemaString).asInstanceOf[StructType] applySchemaToPythonRDD(rdd, schema) } /** * Apply `schema` to an RDD. * * @note Used by PySpark only */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schema: StructType): DataFrame = { val rowRdd = rdd.mapPartitions { iter => val fromJava = python.EvaluatePython.makeFromJava(schema) iter.map(r => fromJava(r).asInstanceOf[InternalRow]) } internalCreateDataFrame(rowRdd, schema) } /** * Returns a Catalyst Schema for the given java bean class. */ private def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } } /** * Execute a block of code with the this session set as the active session, and restore the * previous session on completion. */ private[sql] def withActive[T](block: => T): T = { // Use the active session thread local directly to make sure we get the session that is actually // set and not the default session. This to prevent that we promote the default session to the // active session once we are done. val old = SparkSession.activeThreadSession.get() SparkSession.setActiveSession(this) try block finally { SparkSession.setActiveSession(old) } } private[sql] def leafNodeDefaultParallelism: Int = { conf.get(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM).getOrElse(sparkContext.defaultParallelism) }}@Stableobject SparkSession extends Logging { /** * Builder for [[SparkSession]]. */ @Stable class Builder extends Logging { private[this] val options = new scala.collection.mutable.HashMap[String, String] private[this] val extensions = new SparkSessionExtensions private[this] var userSuppliedContext: Option[SparkContext] = None private[spark] def sparkContext(sparkContext: SparkContext): Builder = synchronized { userSuppliedContext = Option(sparkContext) this } /** * Sets a name for the application, which will be shown in the Spark web UI. * If no application name is set, a randomly generated name will be used. * * @since 2.0.0 */ def appName(name: String): Builder = config("spark.app.name", name) /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: String): Builder = synchronized { options += key -> value this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Long): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Double): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Boolean): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: Map[String, Any]): Builder = synchronized { map.foreach { kv: (String, Any) => { options += kv._1 -> kv._2.toString } } this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: java.util.Map[String, Any]): Builder = synchronized { config(map.asScala.toMap) } /** * Sets a list of config options based on the given `SparkConf`. * * @since 2.0.0 */ def config(conf: SparkConf): Builder = synchronized { conf.getAll.foreach { case (k, v) => options += k -> v } this } /** * Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. * * @since 2.0.0 */ def master(master: String): Builder = config("spark.master", master) /** * Enables Hive support, including connectivity to a persistent Hive metastore, support for * Hive serdes, and Hive user-defined functions. * * @since 2.0.0 */ def enableHiveSupport(): Builder = synchronized { if (hiveClassesArePresent) { config(CATALOG_IMPLEMENTATION.key, "hive") } else { throw new IllegalArgumentException( "Unable to instantiate SparkSession with Hive support because " + "Hive classes are not found.") } } /** * Inject extensions into the [[SparkSession]]. This allows a user to add Analyzer rules, * Optimizer rules, Planning Strategies or a customized parser. * * @since 2.2.0 */ def withExtensions(f: SparkSessionExtensions => Unit): Builder = synchronized { f(extensions) this } /** * Gets an existing [[SparkSession]] or, if there is no existing one, creates a new * one based on the options set in this builder. * * This method first checks whether there is a valid thread-local SparkSession, * and if yes, return that one. It then checks whether there is a valid global * default SparkSession, and if yes, return that one. If no valid global default * SparkSession exists, the method creates a new SparkSession and assigns the * newly created SparkSession as the global default. * * In case an existing SparkSession is returned, the non-static config options specified in * this builder will be applied to the existing SparkSession. * * @since 2.0.0 */ def getOrCreate(): SparkSession = synchronized { val sparkConf = new SparkConf() options.foreach { case (k, v) => sparkConf.set(k, v) } if (!sparkConf.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { assertOnDriver() } // Get the session from current thread's active session. var session = activeThreadSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // Global synchronization so we will only set the default session once. SparkSession.synchronized { // If the current thread does not have an active session, get it from the global session. session = defaultSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // No active nor global default session. Create a new one. val sparkContext = userSuppliedContext.getOrElse { // set a random app name if not given. if (!sparkConf.contains("spark.app.name")) { sparkConf.setAppName(java.util.UUID.randomUUID().toString) } SparkContext.getOrCreate(sparkConf) // Do not update `SparkConf` for existing `SparkContext`, as it's shared by all sessions. } loadExtensions(extensions) applyExtensions(sparkContext, extensions) session = new SparkSession(sparkContext, None, None, extensions, options.toMap) setDefaultSession(session) setActiveSession(session) registerContextListener(sparkContext) } return session } } /** * Creates a [[SparkSession.Builder]] for constructing a [[SparkSession]]. * * @since 2.0.0 */ def builder(): Builder = new Builder /** * Changes the SparkSession that will be returned in this thread and its children when * SparkSession.getOrCreate() is called. This can be used to ensure that a given thread receives * a SparkSession with an isolated session, instead of the global (first created) context. * * @since 2.0.0 */ def setActiveSession(session: SparkSession): Unit = { activeThreadSession.set(session) } /** * Clears the active SparkSession for current thread. Subsequent calls to getOrCreate will * return the first created context instead of a thread-local override. * * @since 2.0.0 */ def clearActiveSession(): Unit = { activeThreadSession.remove() } /** * Sets the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def setDefaultSession(session: SparkSession): Unit = { defaultSession.set(session) } /** * Clears the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def clearDefaultSession(): Unit = { defaultSession.set(null) } /** * Returns the active SparkSession for the current thread, returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(activeThreadSession.get) } } /** * Returns the default SparkSession that is returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getDefaultSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(defaultSession.get) } } /** * Returns the currently active SparkSession, otherwise the default one. If there is no default * SparkSession, throws an exception. * * @since 2.4.0 */ def active: SparkSession = { getActiveSession.getOrElse(getDefaultSession.getOrElse( throw new IllegalStateException("No active or default Spark session found"))) } /** * Apply modifiable settings to an existing [[SparkSession]]. This method are used * both in Scala and Python, so put this under [[SparkSession]] object. */ private[sql] def applyModifiableSettings( session: SparkSession, options: java.util.HashMap[String, String]): Unit = { // Lazy val to avoid an unnecessary session state initialization lazy val conf = session.sessionState.conf val dedupOptions = if (options.isEmpty) Map.empty[String, String] else ( options.asScala.toSet -- conf.getAllConfs.toSet).toMap val (staticConfs, otherConfs) = dedupOptions.partition(kv => SQLConf.isStaticConfigKey(kv._1)) otherConfs.foreach { case (k, v) => conf.setConfString(k, v) } // Note that other runtime SQL options, for example, for other third-party datasource // can be marked as an ignored configuration here. val maybeIgnoredConfs = otherConfs.filterNot { case (k, _) => conf.isModifiable(k) } if (staticConfs.nonEmpty || maybeIgnoredConfs.nonEmpty) { logWarning( "Using an existing Spark session; only runtime SQL configurations will take effect.") } if (staticConfs.nonEmpty) { logDebug("Ignored static SQL configurations:\n " + conf.redactOptions(staticConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } if (maybeIgnoredConfs.nonEmpty) { // Only print out non-static and non-runtime SQL configurations. // Note that this might show core configurations or source specific // options defined in the third-party datasource. logDebug("Configurations that might not take effect:\n " + conf.redactOptions( maybeIgnoredConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } } /** * Returns a cloned SparkSession with all specified configurations disabled, or * the original SparkSession if all configurations are already disabled. */ private[sql] def getOrCloneSessionWithConfigsOff( session: SparkSession, configurations: Seq[ConfigEntry[Boolean]]): SparkSession = { val configsEnabled = configurations.filter(session.conf.get[Boolean]) if (configsEnabled.isEmpty) { session } else { val newSession = session.cloneSession() configsEnabled.foreach(conf => { newSession.conf.set(conf, false) }) newSession } } //////////////////////////////////////////////////////////////////////////////////////// // Private methods from now on //////////////////////////////////////////////////////////////////////////////////////// private val listenerRegistered: AtomicBoolean = new AtomicBoolean(false) /** Register the AppEnd listener onto the Context */ private def registerContextListener(sparkContext: SparkContext): Unit = { if (!listenerRegistered.get()) { sparkContext.addSparkListener(new SparkListener { override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { defaultSession.set(null) listenerRegistered.set(false) } }) listenerRegistered.set(true) } } /** The active SparkSession for the current thread. */ private val activeThreadSession = new InheritableThreadLocal[SparkSession] /** Reference to the root SparkSession. */ private val defaultSession = new AtomicReference[SparkSession] private val HIVE_SESSION_STATE_BUILDER_CLASS_NAME = "org.apache.spark.sql.hive.HiveSessionStateBuilder" private def sessionStateClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_SESSION_STATE_BUILDER_CLASS_NAME case "in-memory" => classOf[SessionStateBuilder].getCanonicalName } } private def assertOnDriver(): Unit = { if (TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkSession should only be created and accessed on the driver.") } } /** * Helper method to create an instance of `SessionState` based on `className` from conf. * The result is either `SessionState` or a Hive based `SessionState`. */ private def instantiateSessionState( className: String, sparkSession: SparkSession): SessionState = { try { // invoke new [Hive]SessionStateBuilder( // SparkSession, // Option[SessionState]) val clazz = Utils.classForName(className) val ctor = clazz.getConstructors.head ctor.newInstance(sparkSession, None).asInstanceOf[BaseSessionStateBuilder].build() } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Error while instantiating '$className':", e) } } /** * @return true if Hive classes can be loaded, otherwise false. */ private[spark] def hiveClassesArePresent: Boolean = { try { Utils.classForName(HIVE_SESSION_STATE_BUILDER_CLASS_NAME) Utils.classForName("org.apache.hadoop.hive.conf.HiveConf") true } catch { case _: ClassNotFoundException | _: NoClassDefFoundError => false } } private[spark] def cleanupAnyExistingSession(): Unit = { val session = getActiveSession.orElse(getDefaultSession) if (session.isDefined) { logWarning( s"""An existing Spark session exists as the active or default session. |This probably means another suite leaked it. Attempting to stop it before continuing. |This existing Spark session was created at: | |${session.get.creationSite.longForm} | """.stripMargin) session.get.stop() SparkSession.clearActiveSession() SparkSession.clearDefaultSession() } } /** * Initialize extensions specified in [[StaticSQLConf]]. The classes will be applied to the * extensions passed into this function. */ private def applyExtensions( sparkContext: SparkContext, extensions: SparkSessionExtensions): SparkSessionExtensions = { val extensionConfClassNames = sparkContext.getConf.get(StaticSQLConf.SPARK_SESSION_EXTENSIONS) .getOrElse(Seq.empty) extensionConfClassNames.foreach { extensionConfClassName => try { val extensionConfClass = Utils.classForName(extensionConfClassName) val extensionConf = extensionConfClass.getConstructor().newInstance() .asInstanceOf[SparkSessionExtensions => Unit] extensionConf(extensions) } catch { // Ignore the error if we cannot find the class or when the class has the wrong type. case e@(_: ClassCastException | _: ClassNotFoundException | _: NoClassDefFoundError) => logWarning(s"Cannot use $extensionConfClassName to configure session extensions.", e) } } extensions } /** * Load extensions from [[ServiceLoader]] and use them */ private def loadExtensions(extensions: SparkSessionExtensions): Unit = { val loader = ServiceLoader.load(classOf[SparkSessionExtensionsProvider], Utils.getContextOrSparkClassLoader) val loadedExts = loader.iterator() while (loadedExts.hasNext) { try { val ext = loadedExts.next() ext(extensions) } catch { case e: Throwable => logWarning("Failed to load session extension", e) } } }} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.spark.sqlimport java.io.Closeableimport java.util.{ServiceLoader, UUID}import java.util.concurrent.TimeUnit._import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}import scala.collection.JavaConverters._import scala.reflect.runtime.universe.TypeTagimport scala.util.control.NonFatalimport org.apache.spark.{SPARK_VERSION, SparkConf, SparkContext, TaskContext}import org.apache.spark.annotation.{DeveloperApi, Experimental, Stable, Unstable}import org.apache.spark.api.java.JavaRDDimport org.apache.spark.internal.Loggingimport org.apache.spark.internal.config.{ConfigEntry, EXECUTOR_ALLOW_SPARK_CONTEXT}import org.apache.spark.rdd.RDDimport org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}import org.apache.spark.sql.catalog.Catalogimport org.apache.spark.sql.catalyst._import org.apache.spark.sql.catalyst.analysis.{NameParameterizedQuery, PosParameterizedQuery, UnresolvedRelation}import org.apache.spark.sql.catalyst.encoders._import org.apache.spark.sql.catalyst.expressions.AttributeReferenceimport org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Range}import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributesimport org.apache.spark.sql.catalyst.util.CharVarcharUtilsimport org.apache.spark.sql.connector.ExternalCommandRunnerimport org.apache.spark.sql.errors.QueryCompilationErrorsimport org.apache.spark.sql.execution._import org.apache.spark.sql.execution.command.ExternalCommandExecutorimport org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}import org.apache.spark.sql.functions.litimport org.apache.spark.sql.internal._import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATIONimport org.apache.spark.sql.sources.BaseRelationimport org.apache.spark.sql.streaming._import org.apache.spark.sql.types.{DataType, StructType}import org.apache.spark.sql.util.ExecutionListenerManagerimport org.apache.spark.util.{CallSite, Utils}/** * The entry point to programming Spark with the Dataset and DataFrame API. * * In environments that this has been created upfront (e.g. REPL, notebooks), use the builder * to get an existing session: * * {{{ * SparkSession.builder().getOrCreate() * }}} * * The builder can also be used to create a new session: * * {{{ * SparkSession.builder * .master("local") * .appName("Word Count") * .config("spark.some.config.option", "some-value") * .getOrCreate() * }}} * * @param sparkContext The Spark context associated with this Spark session. * @param existingSharedState If supplied, use the existing shared state * instead of creating a new one. * @param parentSessionState If supplied, inherit all session state (i.e. temporary * views, SQL config, UDFs etc) from parent. */@Stableclass SparkSession private( @transient val sparkContext: SparkContext, @transient private val existingSharedState: Option[SharedState], @transient private val parentSessionState: Option[SessionState], @transient private[sql] val extensions: SparkSessionExtensions, @transient private[sql] val initialSessionOptions: Map[String, String]) extends Serializable with Closeable with Logging { self => // The call site where this SparkSession was constructed. private val creationSite: CallSite = Utils.getCallSite() /** * Constructor used in Pyspark. Contains explicit application of Spark Session Extensions * which otherwise only occurs during getOrCreate. We cannot add this to the default constructor * since that would cause every new session to reinvoke Spark Session Extensions on the currently * running extensions. */ private[sql] def this( sc: SparkContext, initialSessionOptions: java.util.HashMap[String, String]) = { this(sc, None, None, SparkSession.applyExtensions(sc, new SparkSessionExtensions), initialSessionOptions.asScala.toMap) } private[sql] def this(sc: SparkContext) = this(sc, new java.util.HashMap[String, String]()) private[sql] val sessionUUID: String = UUID.randomUUID.toString sparkContext.assertNotStopped() // If there is no active SparkSession, uses the default SQL conf. Otherwise, use the session's. SQLConf.setSQLConfGetter(() => { SparkSession.getActiveSession.filterNot(_.sparkContext.isStopped).map(_.sessionState.conf) .getOrElse(SQLConf.getFallbackConf) }) /** * The version of Spark on which this application is running. * * @since 2.0.0 */ def version: String = SPARK_VERSION /* ----------------------- * | Session-related state | * ----------------------- */ /** * State shared across sessions, including the `SparkContext`, cached data, listener, * and a catalog that interacts with external systems. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sharedState: SharedState = { existingSharedState.getOrElse(new SharedState(sparkContext, initialSessionOptions)) } /** * State isolated across sessions, including SQL configurations, temporary tables, registered * functions, and everything else that accepts a [[org.apache.spark.sql.internal.SQLConf]]. * If `parentSessionState` is not null, the `SessionState` will be a copy of the parent. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sessionState: SessionState = { parentSessionState .map(_.clone(this)) .getOrElse { val state = SparkSession.instantiateSessionState( SparkSession.sessionStateClassName(sharedState.conf), self) state } } /** * A wrapped version of this session in the form of a [[SQLContext]], for backward compatibility. * * @since 2.0.0 */ @transient val sqlContext: SQLContext = new SQLContext(this) /** * Runtime configuration interface for Spark. * * This is the interface through which the user can get and set all Spark and Hadoop * configurations that are relevant to Spark SQL. When getting the value of a config, * this defaults to the value set in the underlying `SparkContext`, if any. * * @since 2.0.0 */ @transient lazy val conf: RuntimeConfig = new RuntimeConfig(sessionState.conf) /** * An interface to register custom [[org.apache.spark.sql.util.QueryExecutionListener]]s * that listen for execution metrics. * * @since 2.0.0 */ def listenerManager: ExecutionListenerManager = sessionState.listenerManager /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @since 2.0.0 */ @Experimental @Unstable def experimental: ExperimentalMethods = sessionState.experimentalMethods /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sparkSession.udf.register("myUDF", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sparkSession.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @note The user-defined functions must be deterministic. Due to optimization, * duplicate invocations may be eliminated or the function may even be invoked more times than * it is present in the query. * * @since 2.0.0 */ def udf: UDFRegistration = sessionState.udfRegistration def udtf: UDTFRegistration = sessionState.udtfRegistration /** * Returns a `StreamingQueryManager` that allows managing all the * `StreamingQuery`s active on `this`. * * @since 2.0.0 */ @Unstable def streams: StreamingQueryManager = sessionState.streamingQueryManager /** * Start a new session with isolated SQL configurations, temporary tables, registered * functions are isolated, but sharing the underlying `SparkContext` and cached data. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. * * @since 2.0.0 */ def newSession(): SparkSession = { new SparkSession( sparkContext, Some(sharedState), parentSessionState = None, extensions, initialSessionOptions) } /** * Create an identical copy of this `SparkSession`, sharing the underlying `SparkContext` * and shared state. All the state of this session (i.e. SQL configurations, temporary tables, * registered functions) is copied over, and the cloned session is set up with the same shared * state as this session. The cloned session is independent of this session, that is, any * non-global change in either session is not reflected in the other. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. */ private[sql] def cloneSession(): SparkSession = { val result = new SparkSession( sparkContext, Some(sharedState), Some(sessionState), extensions, Map.empty) result.sessionState // force copy of SessionState result } /* --------------------------------- * | Methods for creating DataFrames | * --------------------------------- */ /** * Returns a `DataFrame` with no rows or columns. * * @since 2.0.0 */ @transient lazy val emptyDataFrame: DataFrame = Dataset.ofRows(self, LocalRelation()) /** * Creates a new [[Dataset]] of type T containing zero elements. * * @since 2.0.0 */ def emptyDataset[T: Encoder]: Dataset[T] = { val encoder = implicitly[Encoder[T]] new Dataset(self, LocalRelation(encoder.schema), encoder) } /** * Creates a `DataFrame` from an RDD of Product (e.g. case classes, tuples). * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = withActive { val encoder = Encoders.product[A] Dataset.ofRows(self, ExternalRDD(rdd, self)(encoder)) } /** * Creates a `DataFrame` from a local Seq of Product. * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = withActive { val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = toAttributes(schema) Dataset.ofRows(self, LocalRelation.fromProduct(attributeSeq, data)) } /** * :: DeveloperApi :: * Creates a `DataFrame` from an `RDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sparkSession = new org.apache.spark.sql.SparkSession(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sparkSession.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.createOrReplaceTempView("people") * sparkSession.sql("select name from people").collect.foreach(println) * }}} * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val encoder = ExpressionEncoder(replaced) val toRow = encoder.createSerializer() val catalystRows = rowRDD.map(toRow) internalCreateDataFrame(catalystRows.setName(rowRDD.name), schema) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `JavaRDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] createDataFrame(rowRDD.rdd, replaced) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `java.util.List` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided List matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] Dataset.ofRows(self, LocalRelation.fromExternalRows(toAttributes(replaced), rows.asScala.toSeq)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = withActive { val attributeSeq: Seq[AttributeReference] = getSchema(beanClass) val className = beanClass.getName val rowRdd = rdd.mapPartitions { iter => // BeanInfo is not serializable so we must rediscover it remotely for each partition. SQLContext.beansToRows(iter, Utils.classForName(className), attributeSeq) } Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRdd.setName(rdd.name))(self)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd.rdd, beanClass) } /** * Applies a schema to a List of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @since 1.6.0 */ def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = withActive { val attrSeq = getSchema(beanClass) val rows = SQLContext.beansToRows(data.asScala.iterator, beanClass, attrSeq) Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq)) } /** * Convert a `BaseRelation` created for external data sources into a `DataFrame`. * * @since 2.0.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { Dataset.ofRows(self, LogicalRelation(baseRelation)) } /* ------------------------------- * | Methods for creating DataSets | * ------------------------------- */ /** * Creates a [[Dataset]] from a local Seq of data of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Example == * * {{{ * * import spark.implicits._ * case class Person(name: String, age: Long) * val data = Seq(Person("Michael", 29), Person("Andy", 30), Person("Justin", 19)) * val ds = spark.createDataset(data) * * ds.show() * // +-------+---+ * // | name|age| * // +-------+---+ * // |Michael| 29| * // | Andy| 30| * // | Justin| 19| * // +-------+---+ * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = { val enc = encoderFor[T] val toRow = enc.createSerializer() val attributes = toAttributes(enc.schema) val encoded = data.map(d => toRow(d).copy()) val plan = new LocalRelation(attributes, encoded) Dataset[T](self, plan) } /** * Creates a [[Dataset]] from an RDD of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * @since 2.0.0 */ def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = { Dataset[T](self, ExternalRDD(data, self)) } /** * Creates a [[Dataset]] from a `java.util.List` of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Java Example == * * {{{ * List<String> data = Arrays.asList("hello", "world"); * Dataset<String> ds = spark.createDataset(data, Encoders.STRING()); * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = { createDataset(data.asScala.toSeq) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from 0 to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(end: Long): Dataset[java.lang.Long] = range(0, end) /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(start: Long, end: Long): Dataset[java.lang.Long] = { range(start, end, step = 1, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long): Dataset[java.lang.Long] = { range(start, end, step, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value, with partition number * specified. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = { new Dataset(self, Range(start, end, step, numPartitions), Encoders.LONG) } /** * Creates a `DataFrame` from an `RDD[InternalRow]`. */ private[sql] def internalCreateDataFrame( catalystRows: RDD[InternalRow], schema: StructType, isStreaming: Boolean = false): DataFrame = { // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val logicalPlan = LogicalRDD( toAttributes(schema), catalystRows, isStreaming = isStreaming)(self) Dataset.ofRows(self, logicalPlan) } /* ------------------------- * | Catalog-related methods | * ------------------------- */ /** * Interface through which the user may create, drop, alter or query underlying * databases, tables, functions etc. * * @since 2.0.0 */ @transient lazy val catalog: Catalog = new CatalogImpl(self) /** * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, * the returned DataFrame is simply the query plan of the view, which can either be a batch or * streaming query plan. * * @param tableName is either a qualified or unqualified name that designates a table or view. * If a database is specified, it identifies the table/view from the database. * Otherwise, it first attempts to find a temporary view with the given name * and then match the table/view from the current database. * Note that, the global temporary view database is also valid here. * @since 2.0.0 */ def table(tableName: String): DataFrame = { read.table(tableName) } private[sql] def table(tableIdent: TableIdentifier): DataFrame = { Dataset.ofRows(self, UnresolvedRelation(tableIdent)) } /* ----------------- * | Everything else | * ----------------- */ /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql(sqlText: String, args: Array[_], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { PosParameterizedQuery(parsedPlan, args.map(lit(_).expr)) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.5.0 */ @Experimental def sql(sqlText: String, args: Array[_]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql( sqlText: String, args: Map[String, Any], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { NameParameterizedQuery(parsedPlan, args.mapValues(lit(_).expr).toMap) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: Map[String, Any]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: java.util.Map[String, Any]): DataFrame = { sql(sqlText, args.asScala.toMap) } /** * Executes a SQL query using Spark, returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @since 2.0.0 */ def sql(sqlText: String): DataFrame = sql(sqlText, Map.empty[String, Any]) /** * Execute an arbitrary string command inside an external execution engine rather than Spark. * This could be useful when user wants to execute some commands out of Spark. For * example, executing custom DDL/DML command for JDBC, creating index for ElasticSearch, * creating cores for Solr and so on. * * The command will be eagerly executed after this method is called and the returned * DataFrame will contain the output of the command(if any). * * @param runner The class name of the runner that implements `ExternalCommandRunner`. * @param command The target command to be executed * @param options The options for the runner. * * @since 3.0.0 */ @Unstable def executeCommand(runner: String, command: String, options: Map[String, String]): DataFrame = { DataSource.lookupDataSource(runner, sessionState.conf) match { case source if classOf[ExternalCommandRunner].isAssignableFrom(source) => Dataset.ofRows(self, ExternalCommandExecutor( source.newInstance().asInstanceOf[ExternalCommandRunner], command, options)) case _ => throw QueryCompilationErrors.commandExecutionInRunnerUnsupportedError(runner) } } /** * Returns a [[DataFrameReader]] that can be used to read non-streaming data in as a * `DataFrame`. * {{{ * sparkSession.read.parquet("/path/to/file.parquet") * sparkSession.read.schema(schema).json("/path/to/file.json") * }}} * * @since 2.0.0 */ def read: DataFrameReader = new DataFrameReader(self) /** * Returns a `DataStreamReader` that can be used to read streaming data in as a `DataFrame`. * {{{ * sparkSession.readStream.parquet("/path/to/directory/of/parquet/files") * sparkSession.readStream.schema(schema).json("/path/to/directory/of/json/files") * }}} * * @since 2.0.0 */ def readStream: DataStreamReader = new DataStreamReader(self) /** * Executes some code block and prints to stdout the time taken to execute the block. This is * available in Scala only and is used primarily for interactive testing and debugging. * * @since 2.1.0 */ def time[T](f: => T): T = { val start = System.nanoTime() val ret = f val end = System.nanoTime() // scalastyle:off println println(s"Time taken: ${NANOSECONDS.toMillis(end - start)} ms") // scalastyle:on println ret } // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into `DataFrame`s. * * {{{ * val sparkSession = SparkSession.builder.getOrCreate() * import sparkSession.implicits._ * }}} * * @since 2.0.0 */ object implicits extends SQLImplicits with Serializable { protected override def _sqlContext: SQLContext = SparkSession.this.sqlContext } // scalastyle:on /** * Stop the underlying `SparkContext`. * * @since 2.0.0 */ def stop(): Unit = { sparkContext.stop() } /** * Synonym for `stop()`. * * @since 2.1.0 */ override def close(): Unit = stop() /** * Parses the data type in our internal string representation. The data type string should * have the same format as the one generated by `toString` in scala. * It is only used by PySpark. */ protected[sql] def parseDataType(dataTypeString: String): DataType = { DataType.fromJson(dataTypeString) } /** * Apply a schema defined by the schemaString to an RDD. It is only used by PySpark. */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schemaString: String): DataFrame = { val schema = DataType.fromJson(schemaString).asInstanceOf[StructType] applySchemaToPythonRDD(rdd, schema) } /** * Apply `schema` to an RDD. * * @note Used by PySpark only */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schema: StructType): DataFrame = { val rowRdd = rdd.mapPartitions { iter => val fromJava = python.EvaluatePython.makeFromJava(schema) iter.map(r => fromJava(r).asInstanceOf[InternalRow]) } internalCreateDataFrame(rowRdd, schema) } /** * Returns a Catalyst Schema for the given java bean class. */ private def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } } /** * Execute a block of code with the this session set as the active session, and restore the * previous session on completion. */ private[sql] def withActive[T](block: => T): T = { // Use the active session thread local directly to make sure we get the session that is actually // set and not the default session. This to prevent that we promote the default session to the // active session once we are done. val old = SparkSession.activeThreadSession.get() SparkSession.setActiveSession(this) try block finally { SparkSession.setActiveSession(old) } } private[sql] def leafNodeDefaultParallelism: Int = { conf.get(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM).getOrElse(sparkContext.defaultParallelism) }}@Stableobject SparkSession extends Logging { /** * Builder for [[SparkSession]]. */ @Stable class Builder extends Logging { private[this] val options = new scala.collection.mutable.HashMap[String, String] private[this] val extensions = new SparkSessionExtensions private[this] var userSuppliedContext: Option[SparkContext] = None private[spark] def sparkContext(sparkContext: SparkContext): Builder = synchronized { userSuppliedContext = Option(sparkContext) this } /** * Sets a name for the application, which will be shown in the Spark web UI. * If no application name is set, a randomly generated name will be used. * * @since 2.0.0 */ def appName(name: String): Builder = config("spark.app.name", name) /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: String): Builder = synchronized { options += key -> value this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Long): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Double): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Boolean): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: Map[String, Any]): Builder = synchronized { map.foreach { kv: (String, Any) => { options += kv._1 -> kv._2.toString } } this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: java.util.Map[String, Any]): Builder = synchronized { config(map.asScala.toMap) } /** * Sets a list of config options based on the given `SparkConf`. * * @since 2.0.0 */ def config(conf: SparkConf): Builder = synchronized { conf.getAll.foreach { case (k, v) => options += k -> v } this } /** * Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. * * @since 2.0.0 */ def master(master: String): Builder = config("spark.master", master) /** * Enables Hive support, including connectivity to a persistent Hive metastore, support for * Hive serdes, and Hive user-defined functions. * * @since 2.0.0 */ def enableHiveSupport(): Builder = synchronized { if (hiveClassesArePresent) { config(CATALOG_IMPLEMENTATION.key, "hive") } else { throw new IllegalArgumentException( "Unable to instantiate SparkSession with Hive support because " + "Hive classes are not found.") } } /** * Inject extensions into the [[SparkSession]]. This allows a user to add Analyzer rules, * Optimizer rules, Planning Strategies or a customized parser. * * @since 2.2.0 */ def withExtensions(f: SparkSessionExtensions => Unit): Builder = synchronized { f(extensions) this } /** * Gets an existing [[SparkSession]] or, if there is no existing one, creates a new * one based on the options set in this builder. * * This method first checks whether there is a valid thread-local SparkSession, * and if yes, return that one. It then checks whether there is a valid global * default SparkSession, and if yes, return that one. If no valid global default * SparkSession exists, the method creates a new SparkSession and assigns the * newly created SparkSession as the global default. * * In case an existing SparkSession is returned, the non-static config options specified in * this builder will be applied to the existing SparkSession. * * @since 2.0.0 */ def getOrCreate(): SparkSession = synchronized { val sparkConf = new SparkConf() options.foreach { case (k, v) => sparkConf.set(k, v) } if (!sparkConf.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { assertOnDriver() } // Get the session from current thread's active session. var session = activeThreadSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // Global synchronization so we will only set the default session once. SparkSession.synchronized { // If the current thread does not have an active session, get it from the global session. session = defaultSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // No active nor global default session. Create a new one. val sparkContext = userSuppliedContext.getOrElse { // set a random app name if not given. if (!sparkConf.contains("spark.app.name")) { sparkConf.setAppName(java.util.UUID.randomUUID().toString) } SparkContext.getOrCreate(sparkConf) // Do not update `SparkConf` for existing `SparkContext`, as it's shared by all sessions. } loadExtensions(extensions) applyExtensions(sparkContext, extensions) session = new SparkSession(sparkContext, None, None, extensions, options.toMap) setDefaultSession(session) setActiveSession(session) registerContextListener(sparkContext) } return session } } /** * Creates a [[SparkSession.Builder]] for constructing a [[SparkSession]]. * * @since 2.0.0 */ def builder(): Builder = new Builder /** * Changes the SparkSession that will be returned in this thread and its children when * SparkSession.getOrCreate() is called. This can be used to ensure that a given thread receives * a SparkSession with an isolated session, instead of the global (first created) context. * * @since 2.0.0 */ def setActiveSession(session: SparkSession): Unit = { activeThreadSession.set(session) } /** * Clears the active SparkSession for current thread. Subsequent calls to getOrCreate will * return the first created context instead of a thread-local override. * * @since 2.0.0 */ def clearActiveSession(): Unit = { activeThreadSession.remove() } /** * Sets the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def setDefaultSession(session: SparkSession): Unit = { defaultSession.set(session) } /** * Clears the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def clearDefaultSession(): Unit = { defaultSession.set(null) } /** * Returns the active SparkSession for the current thread, returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(activeThreadSession.get) } } /** * Returns the default SparkSession that is returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getDefaultSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(defaultSession.get) } } /** * Returns the currently active SparkSession, otherwise the default one. If there is no default * SparkSession, throws an exception. * * @since 2.4.0 */ def active: SparkSession = { getActiveSession.getOrElse(getDefaultSession.getOrElse( throw new IllegalStateException("No active or default Spark session found"))) } /** * Apply modifiable settings to an existing [[SparkSession]]. This method are used * both in Scala and Python, so put this under [[SparkSession]] object. */ private[sql] def applyModifiableSettings( session: SparkSession, options: java.util.HashMap[String, String]): Unit = { // Lazy val to avoid an unnecessary session state initialization lazy val conf = session.sessionState.conf val dedupOptions = if (options.isEmpty) Map.empty[String, String] else ( options.asScala.toSet -- conf.getAllConfs.toSet).toMap val (staticConfs, otherConfs) = dedupOptions.partition(kv => SQLConf.isStaticConfigKey(kv._1)) otherConfs.foreach { case (k, v) => conf.setConfString(k, v) } // Note that other runtime SQL options, for example, for other third-party datasource // can be marked as an ignored configuration here. val maybeIgnoredConfs = otherConfs.filterNot { case (k, _) => conf.isModifiable(k) } if (staticConfs.nonEmpty || maybeIgnoredConfs.nonEmpty) { logWarning( "Using an existing Spark session; only runtime SQL configurations will take effect.") } if (staticConfs.nonEmpty) { logDebug("Ignored static SQL configurations:\n " + conf.redactOptions(staticConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } if (maybeIgnoredConfs.nonEmpty) { // Only print out non-static and non-runtime SQL configurations. // Note that this might show core configurations or source specific // options defined in the third-party datasource. logDebug("Configurations that might not take effect:\n " + conf.redactOptions( maybeIgnoredConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } } /** * Returns a cloned SparkSession with all specified configurations disabled, or * the original SparkSession if all configurations are already disabled. */ private[sql] def getOrCloneSessionWithConfigsOff( session: SparkSession, configurations: Seq[ConfigEntry[Boolean]]): SparkSession = { val configsEnabled = configurations.filter(session.conf.get[Boolean]) if (configsEnabled.isEmpty) { session } else { val newSession = session.cloneSession() configsEnabled.foreach(conf => { newSession.conf.set(conf, false) }) newSession } } //////////////////////////////////////////////////////////////////////////////////////// // Private methods from now on //////////////////////////////////////////////////////////////////////////////////////// private val listenerRegistered: AtomicBoolean = new AtomicBoolean(false) /** Register the AppEnd listener onto the Context */ private def registerContextListener(sparkContext: SparkContext): Unit = { if (!listenerRegistered.get()) { sparkContext.addSparkListener(new SparkListener { override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { defaultSession.set(null) listenerRegistered.set(false) } }) listenerRegistered.set(true) } } /** The active SparkSession for the current thread. */ private val activeThreadSession = new InheritableThreadLocal[SparkSession] /** Reference to the root SparkSession. */ private val defaultSession = new AtomicReference[SparkSession] private val HIVE_SESSION_STATE_BUILDER_CLASS_NAME = "org.apache.spark.sql.hive.HiveSessionStateBuilder" private def sessionStateClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_SESSION_STATE_BUILDER_CLASS_NAME case "in-memory" => classOf[SessionStateBuilder].getCanonicalName } } private def assertOnDriver(): Unit = { if (TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkSession should only be created and accessed on the driver.") } } /** * Helper method to create an instance of `SessionState` based on `className` from conf. * The result is either `SessionState` or a Hive based `SessionState`. */ private def instantiateSessionState( className: String, sparkSession: SparkSession): SessionState = { try { // invoke new [Hive]SessionStateBuilder( // SparkSession, // Option[SessionState]) val clazz = Utils.classForName(className) val ctor = clazz.getConstructors.head ctor.newInstance(sparkSession, None).asInstanceOf[BaseSessionStateBuilder].build() } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Error while instantiating '$className':", e) } } /** * @return true if Hive classes can be loaded, otherwise false. */ private[spark] def hiveClassesArePresent: Boolean = { try { Utils.classForName(HIVE_SESSION_STATE_BUILDER_CLASS_NAME) Utils.classForName("org.apache.hadoop.hive.conf.HiveConf") true } catch { case _: ClassNotFoundException | _: NoClassDefFoundError => false } } private[spark] def cleanupAnyExistingSession(): Unit = { val session = getActiveSession.orElse(getDefaultSession) if (session.isDefined) { logWarning( s"""An existing Spark session exists as the active or default session. |This probably means another suite leaked it. Attempting to stop it before continuing. |This existing Spark session was created at: | |${session.get.creationSite.longForm} | """.stripMargin) session.get.stop() SparkSession.clearActiveSession() SparkSession.clearDefaultSession() } } /** * Initialize extensions specified in [[StaticSQLConf]]. The classes will be applied to the * extensions passed into this function. */ private def applyExtensions( sparkContext: SparkContext, extensions: SparkSessionExtensions): SparkSessionExtensions = { val extensionConfClassNames = sparkContext.getConf.get(StaticSQLConf.SPARK_SESSION_EXTENSIONS) .getOrElse(Seq.empty) extensionConfClassNames.foreach { extensionConfClassName => try { val extensionConfClass = Utils.classForName(extensionConfClassName) val extensionConf = extensionConfClass.getConstructor().newInstance() .asInstanceOf[SparkSessionExtensions => Unit] extensionConf(extensions) } catch { // Ignore the error if we cannot find the class or when the class has the wrong type. case e@(_: ClassCastException | _: ClassNotFoundException | _: NoClassDefFoundError) => logWarning(s"Cannot use $extensionConfClassName to configure session extensions.", e) } } extensions } /** * Load extensions from [[ServiceLoader]] and use them */ private def loadExtensions(extensions: SparkSessionExtensions): Unit = { val loader = ServiceLoader.load(classOf[SparkSessionExtensionsProvider], Utils.getContextOrSparkClassLoader) val loadedExts = loader.iterator() while (loadedExts.hasNext) { try { val ext = loadedExts.next() ext(extensions) } catch { case e: Throwable => logWarning("Failed to load session extension", e) } } }} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.spark.sqlimport java.io.Closeableimport java.util.{ServiceLoader, UUID}import java.util.concurrent.TimeUnit._import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}import scala.collection.JavaConverters._import scala.reflect.runtime.universe.TypeTagimport scala.util.control.NonFatalimport org.apache.spark.{SPARK_VERSION, SparkConf, SparkContext, TaskContext}import org.apache.spark.annotation.{DeveloperApi, Experimental, Stable, Unstable}import org.apache.spark.api.java.JavaRDDimport org.apache.spark.internal.Loggingimport org.apache.spark.internal.config.{ConfigEntry, EXECUTOR_ALLOW_SPARK_CONTEXT}import org.apache.spark.rdd.RDDimport org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}import org.apache.spark.sql.catalog.Catalogimport org.apache.spark.sql.catalyst._import org.apache.spark.sql.catalyst.analysis.{NameParameterizedQuery, PosParameterizedQuery, UnresolvedRelation}import org.apache.spark.sql.catalyst.encoders._import org.apache.spark.sql.catalyst.expressions.AttributeReferenceimport org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Range}import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributesimport org.apache.spark.sql.catalyst.util.CharVarcharUtilsimport org.apache.spark.sql.connector.ExternalCommandRunnerimport org.apache.spark.sql.errors.QueryCompilationErrorsimport org.apache.spark.sql.execution._import org.apache.spark.sql.execution.command.ExternalCommandExecutorimport org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}import org.apache.spark.sql.functions.litimport org.apache.spark.sql.internal._import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATIONimport org.apache.spark.sql.sources.BaseRelationimport org.apache.spark.sql.streaming._import org.apache.spark.sql.types.{DataType, StructType}import org.apache.spark.sql.util.ExecutionListenerManagerimport org.apache.spark.util.{CallSite, Utils}/** * The entry point to programming Spark with the Dataset and DataFrame API. * * In environments that this has been created upfront (e.g. REPL, notebooks), use the builder * to get an existing session: * * {{{ * SparkSession.builder().getOrCreate() * }}} * * The builder can also be used to create a new session: * * {{{ * SparkSession.builder * .master("local") * .appName("Word Count") * .config("spark.some.config.option", "some-value") * .getOrCreate() * }}} * * @param sparkContext The Spark context associated with this Spark session. * @param existingSharedState If supplied, use the existing shared state * instead of creating a new one. * @param parentSessionState If supplied, inherit all session state (i.e. temporary * views, SQL config, UDFs etc) from parent. */@Stableclass SparkSession private( @transient val sparkContext: SparkContext, @transient private val existingSharedState: Option[SharedState], @transient private val parentSessionState: Option[SessionState], @transient private[sql] val extensions: SparkSessionExtensions, @transient private[sql] val initialSessionOptions: Map[String, String]) extends Serializable with Closeable with Logging { self => // The call site where this SparkSession was constructed. private val creationSite: CallSite = Utils.getCallSite() /** * Constructor used in Pyspark. Contains explicit application of Spark Session Extensions * which otherwise only occurs during getOrCreate. We cannot add this to the default constructor * since that would cause every new session to reinvoke Spark Session Extensions on the currently * running extensions. */ private[sql] def this( sc: SparkContext, initialSessionOptions: java.util.HashMap[String, String]) = { this(sc, None, None, SparkSession.applyExtensions(sc, new SparkSessionExtensions), initialSessionOptions.asScala.toMap) } private[sql] def this(sc: SparkContext) = this(sc, new java.util.HashMap[String, String]()) private[sql] val sessionUUID: String = UUID.randomUUID.toString sparkContext.assertNotStopped() // If there is no active SparkSession, uses the default SQL conf. Otherwise, use the session's. SQLConf.setSQLConfGetter(() => { SparkSession.getActiveSession.filterNot(_.sparkContext.isStopped).map(_.sessionState.conf) .getOrElse(SQLConf.getFallbackConf) }) /** * The version of Spark on which this application is running. * * @since 2.0.0 */ def version: String = SPARK_VERSION /* ----------------------- * | Session-related state | * ----------------------- */ /** * State shared across sessions, including the `SparkContext`, cached data, listener, * and a catalog that interacts with external systems. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sharedState: SharedState = { existingSharedState.getOrElse(new SharedState(sparkContext, initialSessionOptions)) } /** * State isolated across sessions, including SQL configurations, temporary tables, registered * functions, and everything else that accepts a [[org.apache.spark.sql.internal.SQLConf]]. * If `parentSessionState` is not null, the `SessionState` will be a copy of the parent. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sessionState: SessionState = { parentSessionState .map(_.clone(this)) .getOrElse { val state = SparkSession.instantiateSessionState( SparkSession.sessionStateClassName(sharedState.conf), self) state } } /** * A wrapped version of this session in the form of a [[SQLContext]], for backward compatibility. * * @since 2.0.0 */ @transient val sqlContext: SQLContext = new SQLContext(this) /** * Runtime configuration interface for Spark. * * This is the interface through which the user can get and set all Spark and Hadoop * configurations that are relevant to Spark SQL. When getting the value of a config, * this defaults to the value set in the underlying `SparkContext`, if any. * * @since 2.0.0 */ @transient lazy val conf: RuntimeConfig = new RuntimeConfig(sessionState.conf) /** * An interface to register custom [[org.apache.spark.sql.util.QueryExecutionListener]]s * that listen for execution metrics. * * @since 2.0.0 */ def listenerManager: ExecutionListenerManager = sessionState.listenerManager /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @since 2.0.0 */ @Experimental @Unstable def experimental: ExperimentalMethods = sessionState.experimentalMethods /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sparkSession.udf.register("myUDF", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sparkSession.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @note The user-defined functions must be deterministic. Due to optimization, * duplicate invocations may be eliminated or the function may even be invoked more times than * it is present in the query. * * @since 2.0.0 */ def udf: UDFRegistration = sessionState.udfRegistration def udtf: UDTFRegistration = sessionState.udtfRegistration /** * Returns a `StreamingQueryManager` that allows managing all the * `StreamingQuery`s active on `this`. * * @since 2.0.0 */ @Unstable def streams: StreamingQueryManager = sessionState.streamingQueryManager /** * Start a new session with isolated SQL configurations, temporary tables, registered * functions are isolated, but sharing the underlying `SparkContext` and cached data. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. * * @since 2.0.0 */ def newSession(): SparkSession = { new SparkSession( sparkContext, Some(sharedState), parentSessionState = None, extensions, initialSessionOptions) } /** * Create an identical copy of this `SparkSession`, sharing the underlying `SparkContext` * and shared state. All the state of this session (i.e. SQL configurations, temporary tables, * registered functions) is copied over, and the cloned session is set up with the same shared * state as this session. The cloned session is independent of this session, that is, any * non-global change in either session is not reflected in the other. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. */ private[sql] def cloneSession(): SparkSession = { val result = new SparkSession( sparkContext, Some(sharedState), Some(sessionState), extensions, Map.empty) result.sessionState // force copy of SessionState result } /* --------------------------------- * | Methods for creating DataFrames | * --------------------------------- */ /** * Returns a `DataFrame` with no rows or columns. * * @since 2.0.0 */ @transient lazy val emptyDataFrame: DataFrame = Dataset.ofRows(self, LocalRelation()) /** * Creates a new [[Dataset]] of type T containing zero elements. * * @since 2.0.0 */ def emptyDataset[T: Encoder]: Dataset[T] = { val encoder = implicitly[Encoder[T]] new Dataset(self, LocalRelation(encoder.schema), encoder) } /** * Creates a `DataFrame` from an RDD of Product (e.g. case classes, tuples). * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = withActive { val encoder = Encoders.product[A] Dataset.ofRows(self, ExternalRDD(rdd, self)(encoder)) } /** * Creates a `DataFrame` from a local Seq of Product. * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = withActive { val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = toAttributes(schema) Dataset.ofRows(self, LocalRelation.fromProduct(attributeSeq, data)) } /** * :: DeveloperApi :: * Creates a `DataFrame` from an `RDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sparkSession = new org.apache.spark.sql.SparkSession(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sparkSession.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.createOrReplaceTempView("people") * sparkSession.sql("select name from people").collect.foreach(println) * }}} * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val encoder = ExpressionEncoder(replaced) val toRow = encoder.createSerializer() val catalystRows = rowRDD.map(toRow) internalCreateDataFrame(catalystRows.setName(rowRDD.name), schema) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `JavaRDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] createDataFrame(rowRDD.rdd, replaced) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `java.util.List` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided List matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] Dataset.ofRows(self, LocalRelation.fromExternalRows(toAttributes(replaced), rows.asScala.toSeq)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = withActive { val attributeSeq: Seq[AttributeReference] = getSchema(beanClass) val className = beanClass.getName val rowRdd = rdd.mapPartitions { iter => // BeanInfo is not serializable so we must rediscover it remotely for each partition. SQLContext.beansToRows(iter, Utils.classForName(className), attributeSeq) } Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRdd.setName(rdd.name))(self)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd.rdd, beanClass) } /** * Applies a schema to a List of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @since 1.6.0 */ def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = withActive { val attrSeq = getSchema(beanClass) val rows = SQLContext.beansToRows(data.asScala.iterator, beanClass, attrSeq) Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq)) } /** * Convert a `BaseRelation` created for external data sources into a `DataFrame`. * * @since 2.0.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { Dataset.ofRows(self, LogicalRelation(baseRelation)) } /* ------------------------------- * | Methods for creating DataSets | * ------------------------------- */ /** * Creates a [[Dataset]] from a local Seq of data of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Example == * * {{{ * * import spark.implicits._ * case class Person(name: String, age: Long) * val data = Seq(Person("Michael", 29), Person("Andy", 30), Person("Justin", 19)) * val ds = spark.createDataset(data) * * ds.show() * // +-------+---+ * // | name|age| * // +-------+---+ * // |Michael| 29| * // | Andy| 30| * // | Justin| 19| * // +-------+---+ * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = { val enc = encoderFor[T] val toRow = enc.createSerializer() val attributes = toAttributes(enc.schema) val encoded = data.map(d => toRow(d).copy()) val plan = new LocalRelation(attributes, encoded) Dataset[T](self, plan) } /** * Creates a [[Dataset]] from an RDD of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * @since 2.0.0 */ def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = { Dataset[T](self, ExternalRDD(data, self)) } /** * Creates a [[Dataset]] from a `java.util.List` of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Java Example == * * {{{ * List<String> data = Arrays.asList("hello", "world"); * Dataset<String> ds = spark.createDataset(data, Encoders.STRING()); * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = { createDataset(data.asScala.toSeq) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from 0 to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(end: Long): Dataset[java.lang.Long] = range(0, end) /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(start: Long, end: Long): Dataset[java.lang.Long] = { range(start, end, step = 1, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long): Dataset[java.lang.Long] = { range(start, end, step, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value, with partition number * specified. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = { new Dataset(self, Range(start, end, step, numPartitions), Encoders.LONG) } /** * Creates a `DataFrame` from an `RDD[InternalRow]`. */ private[sql] def internalCreateDataFrame( catalystRows: RDD[InternalRow], schema: StructType, isStreaming: Boolean = false): DataFrame = { // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val logicalPlan = LogicalRDD( toAttributes(schema), catalystRows, isStreaming = isStreaming)(self) Dataset.ofRows(self, logicalPlan) } /* ------------------------- * | Catalog-related methods | * ------------------------- */ /** * Interface through which the user may create, drop, alter or query underlying * databases, tables, functions etc. * * @since 2.0.0 */ @transient lazy val catalog: Catalog = new CatalogImpl(self) /** * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, * the returned DataFrame is simply the query plan of the view, which can either be a batch or * streaming query plan. * * @param tableName is either a qualified or unqualified name that designates a table or view. * If a database is specified, it identifies the table/view from the database. * Otherwise, it first attempts to find a temporary view with the given name * and then match the table/view from the current database. * Note that, the global temporary view database is also valid here. * @since 2.0.0 */ def table(tableName: String): DataFrame = { read.table(tableName) } private[sql] def table(tableIdent: TableIdentifier): DataFrame = { Dataset.ofRows(self, UnresolvedRelation(tableIdent)) } /* ----------------- * | Everything else | * ----------------- */ /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql(sqlText: String, args: Array[_], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { PosParameterizedQuery(parsedPlan, args.map(lit(_).expr)) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.5.0 */ @Experimental def sql(sqlText: String, args: Array[_]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql( sqlText: String, args: Map[String, Any], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { NameParameterizedQuery(parsedPlan, args.mapValues(lit(_).expr).toMap) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: Map[String, Any]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: java.util.Map[String, Any]): DataFrame = { sql(sqlText, args.asScala.toMap) } /** * Executes a SQL query using Spark, returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @since 2.0.0 */ def sql(sqlText: String): DataFrame = sql(sqlText, Map.empty[String, Any]) /** * Execute an arbitrary string command inside an external execution engine rather than Spark. * This could be useful when user wants to execute some commands out of Spark. For * example, executing custom DDL/DML command for JDBC, creating index for ElasticSearch, * creating cores for Solr and so on. * * The command will be eagerly executed after this method is called and the returned * DataFrame will contain the output of the command(if any). * * @param runner The class name of the runner that implements `ExternalCommandRunner`. * @param command The target command to be executed * @param options The options for the runner. * * @since 3.0.0 */ @Unstable def executeCommand(runner: String, command: String, options: Map[String, String]): DataFrame = { DataSource.lookupDataSource(runner, sessionState.conf) match { case source if classOf[ExternalCommandRunner].isAssignableFrom(source) => Dataset.ofRows(self, ExternalCommandExecutor( source.newInstance().asInstanceOf[ExternalCommandRunner], command, options)) case _ => throw QueryCompilationErrors.commandExecutionInRunnerUnsupportedError(runner) } } /** * Returns a [[DataFrameReader]] that can be used to read non-streaming data in as a * `DataFrame`. * {{{ * sparkSession.read.parquet("/path/to/file.parquet") * sparkSession.read.schema(schema).json("/path/to/file.json") * }}} * * @since 2.0.0 */ def read: DataFrameReader = new DataFrameReader(self) /** * Returns a `DataStreamReader` that can be used to read streaming data in as a `DataFrame`. * {{{ * sparkSession.readStream.parquet("/path/to/directory/of/parquet/files") * sparkSession.readStream.schema(schema).json("/path/to/directory/of/json/files") * }}} * * @since 2.0.0 */ def readStream: DataStreamReader = new DataStreamReader(self) /** * Executes some code block and prints to stdout the time taken to execute the block. This is * available in Scala only and is used primarily for interactive testing and debugging. * * @since 2.1.0 */ def time[T](f: => T): T = { val start = System.nanoTime() val ret = f val end = System.nanoTime() // scalastyle:off println println(s"Time taken: ${NANOSECONDS.toMillis(end - start)} ms") // scalastyle:on println ret } // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into `DataFrame`s. * * {{{ * val sparkSession = SparkSession.builder.getOrCreate() * import sparkSession.implicits._ * }}} * * @since 2.0.0 */ object implicits extends SQLImplicits with Serializable { protected override def _sqlContext: SQLContext = SparkSession.this.sqlContext } // scalastyle:on /** * Stop the underlying `SparkContext`. * * @since 2.0.0 */ def stop(): Unit = { sparkContext.stop() } /** * Synonym for `stop()`. * * @since 2.1.0 */ override def close(): Unit = stop() /** * Parses the data type in our internal string representation. The data type string should * have the same format as the one generated by `toString` in scala. * It is only used by PySpark. */ protected[sql] def parseDataType(dataTypeString: String): DataType = { DataType.fromJson(dataTypeString) } /** * Apply a schema defined by the schemaString to an RDD. It is only used by PySpark. */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schemaString: String): DataFrame = { val schema = DataType.fromJson(schemaString).asInstanceOf[StructType] applySchemaToPythonRDD(rdd, schema) } /** * Apply `schema` to an RDD. * * @note Used by PySpark only */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schema: StructType): DataFrame = { val rowRdd = rdd.mapPartitions { iter => val fromJava = python.EvaluatePython.makeFromJava(schema) iter.map(r => fromJava(r).asInstanceOf[InternalRow]) } internalCreateDataFrame(rowRdd, schema) } /** * Returns a Catalyst Schema for the given java bean class. */ private def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } } /** * Execute a block of code with the this session set as the active session, and restore the * previous session on completion. */ private[sql] def withActive[T](block: => T): T = { // Use the active session thread local directly to make sure we get the session that is actually // set and not the default session. This to prevent that we promote the default session to the // active session once we are done. val old = SparkSession.activeThreadSession.get() SparkSession.setActiveSession(this) try block finally { SparkSession.setActiveSession(old) } } private[sql] def leafNodeDefaultParallelism: Int = { conf.get(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM).getOrElse(sparkContext.defaultParallelism) }}@Stableobject SparkSession extends Logging { /** * Builder for [[SparkSession]]. */ @Stable class Builder extends Logging { private[this] val options = new scala.collection.mutable.HashMap[String, String] private[this] val extensions = new SparkSessionExtensions private[this] var userSuppliedContext: Option[SparkContext] = None private[spark] def sparkContext(sparkContext: SparkContext): Builder = synchronized { userSuppliedContext = Option(sparkContext) this } /** * Sets a name for the application, which will be shown in the Spark web UI. * If no application name is set, a randomly generated name will be used. * * @since 2.0.0 */ def appName(name: String): Builder = config("spark.app.name", name) /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: String): Builder = synchronized { options += key -> value this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Long): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Double): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Boolean): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: Map[String, Any]): Builder = synchronized { map.foreach { kv: (String, Any) => { options += kv._1 -> kv._2.toString } } this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: java.util.Map[String, Any]): Builder = synchronized { config(map.asScala.toMap) } /** * Sets a list of config options based on the given `SparkConf`. * * @since 2.0.0 */ def config(conf: SparkConf): Builder = synchronized { conf.getAll.foreach { case (k, v) => options += k -> v } this } /** * Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. * * @since 2.0.0 */ def master(master: String): Builder = config("spark.master", master) /** * Enables Hive support, including connectivity to a persistent Hive metastore, support for * Hive serdes, and Hive user-defined functions. * * @since 2.0.0 */ def enableHiveSupport(): Builder = synchronized { if (hiveClassesArePresent) { config(CATALOG_IMPLEMENTATION.key, "hive") } else { throw new IllegalArgumentException( "Unable to instantiate SparkSession with Hive support because " + "Hive classes are not found.") } } /** * Inject extensions into the [[SparkSession]]. This allows a user to add Analyzer rules, * Optimizer rules, Planning Strategies or a customized parser. * * @since 2.2.0 */ def withExtensions(f: SparkSessionExtensions => Unit): Builder = synchronized { f(extensions) this } /** * Gets an existing [[SparkSession]] or, if there is no existing one, creates a new * one based on the options set in this builder. * * This method first checks whether there is a valid thread-local SparkSession, * and if yes, return that one. It then checks whether there is a valid global * default SparkSession, and if yes, return that one. If no valid global default * SparkSession exists, the method creates a new SparkSession and assigns the * newly created SparkSession as the global default. * * In case an existing SparkSession is returned, the non-static config options specified in * this builder will be applied to the existing SparkSession. * * @since 2.0.0 */ def getOrCreate(): SparkSession = synchronized { val sparkConf = new SparkConf() options.foreach { case (k, v) => sparkConf.set(k, v) } if (!sparkConf.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { assertOnDriver() } // Get the session from current thread's active session. var session = activeThreadSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // Global synchronization so we will only set the default session once. SparkSession.synchronized { // If the current thread does not have an active session, get it from the global session. session = defaultSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // No active nor global default session. Create a new one. val sparkContext = userSuppliedContext.getOrElse { // set a random app name if not given. if (!sparkConf.contains("spark.app.name")) { sparkConf.setAppName(java.util.UUID.randomUUID().toString) } SparkContext.getOrCreate(sparkConf) // Do not update `SparkConf` for existing `SparkContext`, as it's shared by all sessions. } loadExtensions(extensions) applyExtensions(sparkContext, extensions) session = new SparkSession(sparkContext, None, None, extensions, options.toMap) setDefaultSession(session) setActiveSession(session) registerContextListener(sparkContext) } return session } } /** * Creates a [[SparkSession.Builder]] for constructing a [[SparkSession]]. * * @since 2.0.0 */ def builder(): Builder = new Builder /** * Changes the SparkSession that will be returned in this thread and its children when * SparkSession.getOrCreate() is called. This can be used to ensure that a given thread receives * a SparkSession with an isolated session, instead of the global (first created) context. * * @since 2.0.0 */ def setActiveSession(session: SparkSession): Unit = { activeThreadSession.set(session) } /** * Clears the active SparkSession for current thread. Subsequent calls to getOrCreate will * return the first created context instead of a thread-local override. * * @since 2.0.0 */ def clearActiveSession(): Unit = { activeThreadSession.remove() } /** * Sets the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def setDefaultSession(session: SparkSession): Unit = { defaultSession.set(session) } /** * Clears the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def clearDefaultSession(): Unit = { defaultSession.set(null) } /** * Returns the active SparkSession for the current thread, returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(activeThreadSession.get) } } /** * Returns the default SparkSession that is returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getDefaultSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(defaultSession.get) } } /** * Returns the currently active SparkSession, otherwise the default one. If there is no default * SparkSession, throws an exception. * * @since 2.4.0 */ def active: SparkSession = { getActiveSession.getOrElse(getDefaultSession.getOrElse( throw new IllegalStateException("No active or default Spark session found"))) } /** * Apply modifiable settings to an existing [[SparkSession]]. This method are used * both in Scala and Python, so put this under [[SparkSession]] object. */ private[sql] def applyModifiableSettings( session: SparkSession, options: java.util.HashMap[String, String]): Unit = { // Lazy val to avoid an unnecessary session state initialization lazy val conf = session.sessionState.conf val dedupOptions = if (options.isEmpty) Map.empty[String, String] else ( options.asScala.toSet -- conf.getAllConfs.toSet).toMap val (staticConfs, otherConfs) = dedupOptions.partition(kv => SQLConf.isStaticConfigKey(kv._1)) otherConfs.foreach { case (k, v) => conf.setConfString(k, v) } // Note that other runtime SQL options, for example, for other third-party datasource // can be marked as an ignored configuration here. val maybeIgnoredConfs = otherConfs.filterNot { case (k, _) => conf.isModifiable(k) } if (staticConfs.nonEmpty || maybeIgnoredConfs.nonEmpty) { logWarning( "Using an existing Spark session; only runtime SQL configurations will take effect.") } if (staticConfs.nonEmpty) { logDebug("Ignored static SQL configurations:\n " + conf.redactOptions(staticConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } if (maybeIgnoredConfs.nonEmpty) { // Only print out non-static and non-runtime SQL configurations. // Note that this might show core configurations or source specific // options defined in the third-party datasource. logDebug("Configurations that might not take effect:\n " + conf.redactOptions( maybeIgnoredConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } } /** * Returns a cloned SparkSession with all specified configurations disabled, or * the original SparkSession if all configurations are already disabled. */ private[sql] def getOrCloneSessionWithConfigsOff( session: SparkSession, configurations: Seq[ConfigEntry[Boolean]]): SparkSession = { val configsEnabled = configurations.filter(session.conf.get[Boolean]) if (configsEnabled.isEmpty) { session } else { val newSession = session.cloneSession() configsEnabled.foreach(conf => { newSession.conf.set(conf, false) }) newSession } } //////////////////////////////////////////////////////////////////////////////////////// // Private methods from now on //////////////////////////////////////////////////////////////////////////////////////// private val listenerRegistered: AtomicBoolean = new AtomicBoolean(false) /** Register the AppEnd listener onto the Context */ private def registerContextListener(sparkContext: SparkContext): Unit = { if (!listenerRegistered.get()) { sparkContext.addSparkListener(new SparkListener { override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { defaultSession.set(null) listenerRegistered.set(false) } }) listenerRegistered.set(true) } } /** The active SparkSession for the current thread. */ private val activeThreadSession = new InheritableThreadLocal[SparkSession] /** Reference to the root SparkSession. */ private val defaultSession = new AtomicReference[SparkSession] private val HIVE_SESSION_STATE_BUILDER_CLASS_NAME = "org.apache.spark.sql.hive.HiveSessionStateBuilder" private def sessionStateClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_SESSION_STATE_BUILDER_CLASS_NAME case "in-memory" => classOf[SessionStateBuilder].getCanonicalName } } private def assertOnDriver(): Unit = { if (TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkSession should only be created and accessed on the driver.") } } /** * Helper method to create an instance of `SessionState` based on `className` from conf. * The result is either `SessionState` or a Hive based `SessionState`. */ private def instantiateSessionState( className: String, sparkSession: SparkSession): SessionState = { try { // invoke new [Hive]SessionStateBuilder( // SparkSession, // Option[SessionState]) val clazz = Utils.classForName(className) val ctor = clazz.getConstructors.head ctor.newInstance(sparkSession, None).asInstanceOf[BaseSessionStateBuilder].build() } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Error while instantiating '$className':", e) } } /** * @return true if Hive classes can be loaded, otherwise false. */ private[spark] def hiveClassesArePresent: Boolean = { try { Utils.classForName(HIVE_SESSION_STATE_BUILDER_CLASS_NAME) Utils.classForName("org.apache.hadoop.hive.conf.HiveConf") true } catch { case _: ClassNotFoundException | _: NoClassDefFoundError => false } } private[spark] def cleanupAnyExistingSession(): Unit = { val session = getActiveSession.orElse(getDefaultSession) if (session.isDefined) { logWarning( s"""An existing Spark session exists as the active or default session. |This probably means another suite leaked it. Attempting to stop it before continuing. |This existing Spark session was created at: | |${session.get.creationSite.longForm} | """.stripMargin) session.get.stop() SparkSession.clearActiveSession() SparkSession.clearDefaultSession() } } /** * Initialize extensions specified in [[StaticSQLConf]]. The classes will be applied to the * extensions passed into this function. */ private def applyExtensions( sparkContext: SparkContext, extensions: SparkSessionExtensions): SparkSessionExtensions = { val extensionConfClassNames = sparkContext.getConf.get(StaticSQLConf.SPARK_SESSION_EXTENSIONS) .getOrElse(Seq.empty) extensionConfClassNames.foreach { extensionConfClassName => try { val extensionConfClass = Utils.classForName(extensionConfClassName) val extensionConf = extensionConfClass.getConstructor().newInstance() .asInstanceOf[SparkSessionExtensions => Unit] extensionConf(extensions) } catch { // Ignore the error if we cannot find the class or when the class has the wrong type. case e@(_: ClassCastException | _: ClassNotFoundException | _: NoClassDefFoundError) => logWarning(s"Cannot use $extensionConfClassName to configure session extensions.", e) } } extensions } /** * Load extensions from [[ServiceLoader]] and use them */ private def loadExtensions(extensions: SparkSessionExtensions): Unit = { val loader = ServiceLoader.load(classOf[SparkSessionExtensionsProvider], Utils.getContextOrSparkClassLoader) val loadedExts = loader.iterator() while (loadedExts.hasNext) { try { val ext = loadedExts.next() ext(extensions) } catch { case e: Throwable => logWarning("Failed to load session extension", e) } } }} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.spark.sqlimport java.io.Closeableimport java.util.{ServiceLoader, UUID}import java.util.concurrent.TimeUnit._import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}import scala.collection.JavaConverters._import scala.reflect.runtime.universe.TypeTagimport scala.util.control.NonFatalimport org.apache.spark.{SPARK_VERSION, SparkConf, SparkContext, TaskContext}import org.apache.spark.annotation.{DeveloperApi, Experimental, Stable, Unstable}import org.apache.spark.api.java.JavaRDDimport org.apache.spark.internal.Loggingimport org.apache.spark.internal.config.{ConfigEntry, EXECUTOR_ALLOW_SPARK_CONTEXT}import org.apache.spark.rdd.RDDimport org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}import org.apache.spark.sql.catalog.Catalogimport org.apache.spark.sql.catalyst._import org.apache.spark.sql.catalyst.analysis.{NameParameterizedQuery, PosParameterizedQuery, UnresolvedRelation}import org.apache.spark.sql.catalyst.encoders._import org.apache.spark.sql.catalyst.expressions.AttributeReferenceimport org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Range}import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributesimport org.apache.spark.sql.catalyst.util.CharVarcharUtilsimport org.apache.spark.sql.connector.ExternalCommandRunnerimport org.apache.spark.sql.errors.QueryCompilationErrorsimport org.apache.spark.sql.execution._import org.apache.spark.sql.execution.command.ExternalCommandExecutorimport org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}import org.apache.spark.sql.functions.litimport org.apache.spark.sql.internal._import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATIONimport org.apache.spark.sql.sources.BaseRelationimport org.apache.spark.sql.streaming._import org.apache.spark.sql.types.{DataType, StructType}import org.apache.spark.sql.util.ExecutionListenerManagerimport org.apache.spark.util.{CallSite, Utils}/** * The entry point to programming Spark with the Dataset and DataFrame API. * * In environments that this has been created upfront (e.g. REPL, notebooks), use the builder * to get an existing session: * * {{{ * SparkSession.builder().getOrCreate() * }}} * * The builder can also be used to create a new session: * * {{{ * SparkSession.builder * .master("local") * .appName("Word Count") * .config("spark.some.config.option", "some-value") * .getOrCreate() * }}} * * @param sparkContext The Spark context associated with this Spark session. * @param existingSharedState If supplied, use the existing shared state * instead of creating a new one. * @param parentSessionState If supplied, inherit all session state (i.e. temporary * views, SQL config, UDFs etc) from parent. */@Stableclass SparkSession private( @transient val sparkContext: SparkContext, @transient private val existingSharedState: Option[SharedState], @transient private val parentSessionState: Option[SessionState], @transient private[sql] val extensions: SparkSessionExtensions, @transient private[sql] val initialSessionOptions: Map[String, String]) extends Serializable with Closeable with Logging { self => // The call site where this SparkSession was constructed. private val creationSite: CallSite = Utils.getCallSite() /** * Constructor used in Pyspark. Contains explicit application of Spark Session Extensions * which otherwise only occurs during getOrCreate. We cannot add this to the default constructor * since that would cause every new session to reinvoke Spark Session Extensions on the currently * running extensions. */ private[sql] def this( sc: SparkContext, initialSessionOptions: java.util.HashMap[String, String]) = { this(sc, None, None, SparkSession.applyExtensions(sc, new SparkSessionExtensions), initialSessionOptions.asScala.toMap) } private[sql] def this(sc: SparkContext) = this(sc, new java.util.HashMap[String, String]()) private[sql] val sessionUUID: String = UUID.randomUUID.toString sparkContext.assertNotStopped() // If there is no active SparkSession, uses the default SQL conf. Otherwise, use the session's. SQLConf.setSQLConfGetter(() => { SparkSession.getActiveSession.filterNot(_.sparkContext.isStopped).map(_.sessionState.conf) .getOrElse(SQLConf.getFallbackConf) }) /** * The version of Spark on which this application is running. * * @since 2.0.0 */ def version: String = SPARK_VERSION /* ----------------------- * | Session-related state | * ----------------------- */ /** * State shared across sessions, including the `SparkContext`, cached data, listener, * and a catalog that interacts with external systems. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sharedState: SharedState = { existingSharedState.getOrElse(new SharedState(sparkContext, initialSessionOptions)) } /** * State isolated across sessions, including SQL configurations, temporary tables, registered * functions, and everything else that accepts a [[org.apache.spark.sql.internal.SQLConf]]. * If `parentSessionState` is not null, the `SessionState` will be a copy of the parent. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sessionState: SessionState = { parentSessionState .map(_.clone(this)) .getOrElse { val state = SparkSession.instantiateSessionState( SparkSession.sessionStateClassName(sharedState.conf), self) state } } /** * A wrapped version of this session in the form of a [[SQLContext]], for backward compatibility. * * @since 2.0.0 */ @transient val sqlContext: SQLContext = new SQLContext(this) /** * Runtime configuration interface for Spark. * * This is the interface through which the user can get and set all Spark and Hadoop * configurations that are relevant to Spark SQL. When getting the value of a config, * this defaults to the value set in the underlying `SparkContext`, if any. * * @since 2.0.0 */ @transient lazy val conf: RuntimeConfig = new RuntimeConfig(sessionState.conf) /** * An interface to register custom [[org.apache.spark.sql.util.QueryExecutionListener]]s * that listen for execution metrics. * * @since 2.0.0 */ def listenerManager: ExecutionListenerManager = sessionState.listenerManager /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @since 2.0.0 */ @Experimental @Unstable def experimental: ExperimentalMethods = sessionState.experimentalMethods /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sparkSession.udf.register("myUDF", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sparkSession.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @note The user-defined functions must be deterministic. Due to optimization, * duplicate invocations may be eliminated or the function may even be invoked more times than * it is present in the query. * * @since 2.0.0 */ def udf: UDFRegistration = sessionState.udfRegistration def udtf: UDTFRegistration = sessionState.udtfRegistration /** * Returns a `StreamingQueryManager` that allows managing all the * `StreamingQuery`s active on `this`. * * @since 2.0.0 */ @Unstable def streams: StreamingQueryManager = sessionState.streamingQueryManager /** * Start a new session with isolated SQL configurations, temporary tables, registered * functions are isolated, but sharing the underlying `SparkContext` and cached data. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. * * @since 2.0.0 */ def newSession(): SparkSession = { new SparkSession( sparkContext, Some(sharedState), parentSessionState = None, extensions, initialSessionOptions) } /** * Create an identical copy of this `SparkSession`, sharing the underlying `SparkContext` * and shared state. All the state of this session (i.e. SQL configurations, temporary tables, * registered functions) is copied over, and the cloned session is set up with the same shared * state as this session. The cloned session is independent of this session, that is, any * non-global change in either session is not reflected in the other. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. */ private[sql] def cloneSession(): SparkSession = { val result = new SparkSession( sparkContext, Some(sharedState), Some(sessionState), extensions, Map.empty) result.sessionState // force copy of SessionState result } /* --------------------------------- * | Methods for creating DataFrames | * --------------------------------- */ /** * Returns a `DataFrame` with no rows or columns. * * @since 2.0.0 */ @transient lazy val emptyDataFrame: DataFrame = Dataset.ofRows(self, LocalRelation()) /** * Creates a new [[Dataset]] of type T containing zero elements. * * @since 2.0.0 */ def emptyDataset[T: Encoder]: Dataset[T] = { val encoder = implicitly[Encoder[T]] new Dataset(self, LocalRelation(encoder.schema), encoder) } /** * Creates a `DataFrame` from an RDD of Product (e.g. case classes, tuples). * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = withActive { val encoder = Encoders.product[A] Dataset.ofRows(self, ExternalRDD(rdd, self)(encoder)) } /** * Creates a `DataFrame` from a local Seq of Product. * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = withActive { val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = toAttributes(schema) Dataset.ofRows(self, LocalRelation.fromProduct(attributeSeq, data)) } /** * :: DeveloperApi :: * Creates a `DataFrame` from an `RDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sparkSession = new org.apache.spark.sql.SparkSession(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sparkSession.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.createOrReplaceTempView("people") * sparkSession.sql("select name from people").collect.foreach(println) * }}} * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val encoder = ExpressionEncoder(replaced) val toRow = encoder.createSerializer() val catalystRows = rowRDD.map(toRow) internalCreateDataFrame(catalystRows.setName(rowRDD.name), schema) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `JavaRDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] createDataFrame(rowRDD.rdd, replaced) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `java.util.List` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided List matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] Dataset.ofRows(self, LocalRelation.fromExternalRows(toAttributes(replaced), rows.asScala.toSeq)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = withActive { val attributeSeq: Seq[AttributeReference] = getSchema(beanClass) val className = beanClass.getName val rowRdd = rdd.mapPartitions { iter => // BeanInfo is not serializable so we must rediscover it remotely for each partition. SQLContext.beansToRows(iter, Utils.classForName(className), attributeSeq) } Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRdd.setName(rdd.name))(self)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd.rdd, beanClass) } /** * Applies a schema to a List of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @since 1.6.0 */ def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = withActive { val attrSeq = getSchema(beanClass) val rows = SQLContext.beansToRows(data.asScala.iterator, beanClass, attrSeq) Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq)) } /** * Convert a `BaseRelation` created for external data sources into a `DataFrame`. * * @since 2.0.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { Dataset.ofRows(self, LogicalRelation(baseRelation)) } /* ------------------------------- * | Methods for creating DataSets | * ------------------------------- */ /** * Creates a [[Dataset]] from a local Seq of data of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Example == * * {{{ * * import spark.implicits._ * case class Person(name: String, age: Long) * val data = Seq(Person("Michael", 29), Person("Andy", 30), Person("Justin", 19)) * val ds = spark.createDataset(data) * * ds.show() * // +-------+---+ * // | name|age| * // +-------+---+ * // |Michael| 29| * // | Andy| 30| * // | Justin| 19| * // +-------+---+ * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = { val enc = encoderFor[T] val toRow = enc.createSerializer() val attributes = toAttributes(enc.schema) val encoded = data.map(d => toRow(d).copy()) val plan = new LocalRelation(attributes, encoded) Dataset[T](self, plan) } /** * Creates a [[Dataset]] from an RDD of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * @since 2.0.0 */ def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = { Dataset[T](self, ExternalRDD(data, self)) } /** * Creates a [[Dataset]] from a `java.util.List` of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Java Example == * * {{{ * List<String> data = Arrays.asList("hello", "world"); * Dataset<String> ds = spark.createDataset(data, Encoders.STRING()); * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = { createDataset(data.asScala.toSeq) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from 0 to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(end: Long): Dataset[java.lang.Long] = range(0, end) /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(start: Long, end: Long): Dataset[java.lang.Long] = { range(start, end, step = 1, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long): Dataset[java.lang.Long] = { range(start, end, step, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value, with partition number * specified. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = { new Dataset(self, Range(start, end, step, numPartitions), Encoders.LONG) } /** * Creates a `DataFrame` from an `RDD[InternalRow]`. */ private[sql] def internalCreateDataFrame( catalystRows: RDD[InternalRow], schema: StructType, isStreaming: Boolean = false): DataFrame = { // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val logicalPlan = LogicalRDD( toAttributes(schema), catalystRows, isStreaming = isStreaming)(self) Dataset.ofRows(self, logicalPlan) } /* ------------------------- * | Catalog-related methods | * ------------------------- */ /** * Interface through which the user may create, drop, alter or query underlying * databases, tables, functions etc. * * @since 2.0.0 */ @transient lazy val catalog: Catalog = new CatalogImpl(self) /** * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, * the returned DataFrame is simply the query plan of the view, which can either be a batch or * streaming query plan. * * @param tableName is either a qualified or unqualified name that designates a table or view. * If a database is specified, it identifies the table/view from the database. * Otherwise, it first attempts to find a temporary view with the given name * and then match the table/view from the current database. * Note that, the global temporary view database is also valid here. * @since 2.0.0 */ def table(tableName: String): DataFrame = { read.table(tableName) } private[sql] def table(tableIdent: TableIdentifier): DataFrame = { Dataset.ofRows(self, UnresolvedRelation(tableIdent)) } /* ----------------- * | Everything else | * ----------------- */ /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql(sqlText: String, args: Array[_], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { PosParameterizedQuery(parsedPlan, args.map(lit(_).expr)) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.5.0 */ @Experimental def sql(sqlText: String, args: Array[_]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql( sqlText: String, args: Map[String, Any], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { NameParameterizedQuery(parsedPlan, args.mapValues(lit(_).expr).toMap) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: Map[String, Any]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: java.util.Map[String, Any]): DataFrame = { sql(sqlText, args.asScala.toMap) } /** * Executes a SQL query using Spark, returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @since 2.0.0 */ def sql(sqlText: String): DataFrame = sql(sqlText, Map.empty[String, Any]) /** * Execute an arbitrary string command inside an external execution engine rather than Spark. * This could be useful when user wants to execute some commands out of Spark. For * example, executing custom DDL/DML command for JDBC, creating index for ElasticSearch, * creating cores for Solr and so on. * * The command will be eagerly executed after this method is called and the returned * DataFrame will contain the output of the command(if any). * * @param runner The class name of the runner that implements `ExternalCommandRunner`. * @param command The target command to be executed * @param options The options for the runner. * * @since 3.0.0 */ @Unstable def executeCommand(runner: String, command: String, options: Map[String, String]): DataFrame = { DataSource.lookupDataSource(runner, sessionState.conf) match { case source if classOf[ExternalCommandRunner].isAssignableFrom(source) => Dataset.ofRows(self, ExternalCommandExecutor( source.newInstance().asInstanceOf[ExternalCommandRunner], command, options)) case _ => throw QueryCompilationErrors.commandExecutionInRunnerUnsupportedError(runner) } } /** * Returns a [[DataFrameReader]] that can be used to read non-streaming data in as a * `DataFrame`. * {{{ * sparkSession.read.parquet("/path/to/file.parquet") * sparkSession.read.schema(schema).json("/path/to/file.json") * }}} * * @since 2.0.0 */ def read: DataFrameReader = new DataFrameReader(self) /** * Returns a `DataStreamReader` that can be used to read streaming data in as a `DataFrame`. * {{{ * sparkSession.readStream.parquet("/path/to/directory/of/parquet/files") * sparkSession.readStream.schema(schema).json("/path/to/directory/of/json/files") * }}} * * @since 2.0.0 */ def readStream: DataStreamReader = new DataStreamReader(self) /** * Executes some code block and prints to stdout the time taken to execute the block. This is * available in Scala only and is used primarily for interactive testing and debugging. * * @since 2.1.0 */ def time[T](f: => T): T = { val start = System.nanoTime() val ret = f val end = System.nanoTime() // scalastyle:off println println(s"Time taken: ${NANOSECONDS.toMillis(end - start)} ms") // scalastyle:on println ret } // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into `DataFrame`s. * * {{{ * val sparkSession = SparkSession.builder.getOrCreate() * import sparkSession.implicits._ * }}} * * @since 2.0.0 */ object implicits extends SQLImplicits with Serializable { protected override def _sqlContext: SQLContext = SparkSession.this.sqlContext } // scalastyle:on /** * Stop the underlying `SparkContext`. * * @since 2.0.0 */ def stop(): Unit = { sparkContext.stop() } /** * Synonym for `stop()`. * * @since 2.1.0 */ override def close(): Unit = stop() /** * Parses the data type in our internal string representation. The data type string should * have the same format as the one generated by `toString` in scala. * It is only used by PySpark. */ protected[sql] def parseDataType(dataTypeString: String): DataType = { DataType.fromJson(dataTypeString) } /** * Apply a schema defined by the schemaString to an RDD. It is only used by PySpark. */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schemaString: String): DataFrame = { val schema = DataType.fromJson(schemaString).asInstanceOf[StructType] applySchemaToPythonRDD(rdd, schema) } /** * Apply `schema` to an RDD. * * @note Used by PySpark only */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schema: StructType): DataFrame = { val rowRdd = rdd.mapPartitions { iter => val fromJava = python.EvaluatePython.makeFromJava(schema) iter.map(r => fromJava(r).asInstanceOf[InternalRow]) } internalCreateDataFrame(rowRdd, schema) } /** * Returns a Catalyst Schema for the given java bean class. */ private def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } } /** * Execute a block of code with the this session set as the active session, and restore the * previous session on completion. */ private[sql] def withActive[T](block: => T): T = { // Use the active session thread local directly to make sure we get the session that is actually // set and not the default session. This to prevent that we promote the default session to the // active session once we are done. val old = SparkSession.activeThreadSession.get() SparkSession.setActiveSession(this) try block finally { SparkSession.setActiveSession(old) } } private[sql] def leafNodeDefaultParallelism: Int = { conf.get(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM).getOrElse(sparkContext.defaultParallelism) }}@Stableobject SparkSession extends Logging { /** * Builder for [[SparkSession]]. */ @Stable class Builder extends Logging { private[this] val options = new scala.collection.mutable.HashMap[String, String] private[this] val extensions = new SparkSessionExtensions private[this] var userSuppliedContext: Option[SparkContext] = None private[spark] def sparkContext(sparkContext: SparkContext): Builder = synchronized { userSuppliedContext = Option(sparkContext) this } /** * Sets a name for the application, which will be shown in the Spark web UI. * If no application name is set, a randomly generated name will be used. * * @since 2.0.0 */ def appName(name: String): Builder = config("spark.app.name", name) /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: String): Builder = synchronized { options += key -> value this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Long): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Double): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Boolean): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: Map[String, Any]): Builder = synchronized { map.foreach { kv: (String, Any) => { options += kv._1 -> kv._2.toString } } this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: java.util.Map[String, Any]): Builder = synchronized { config(map.asScala.toMap) } /** * Sets a list of config options based on the given `SparkConf`. * * @since 2.0.0 */ def config(conf: SparkConf): Builder = synchronized { conf.getAll.foreach { case (k, v) => options += k -> v } this } /** * Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. * * @since 2.0.0 */ def master(master: String): Builder = config("spark.master", master) /** * Enables Hive support, including connectivity to a persistent Hive metastore, support for * Hive serdes, and Hive user-defined functions. * * @since 2.0.0 */ def enableHiveSupport(): Builder = synchronized { if (hiveClassesArePresent) { config(CATALOG_IMPLEMENTATION.key, "hive") } else { throw new IllegalArgumentException( "Unable to instantiate SparkSession with Hive support because " + "Hive classes are not found.") } } /** * Inject extensions into the [[SparkSession]]. This allows a user to add Analyzer rules, * Optimizer rules, Planning Strategies or a customized parser. * * @since 2.2.0 */ def withExtensions(f: SparkSessionExtensions => Unit): Builder = synchronized { f(extensions) this } /** * Gets an existing [[SparkSession]] or, if there is no existing one, creates a new * one based on the options set in this builder. * * This method first checks whether there is a valid thread-local SparkSession, * and if yes, return that one. It then checks whether there is a valid global * default SparkSession, and if yes, return that one. If no valid global default * SparkSession exists, the method creates a new SparkSession and assigns the * newly created SparkSession as the global default. * * In case an existing SparkSession is returned, the non-static config options specified in * this builder will be applied to the existing SparkSession. * * @since 2.0.0 */ def getOrCreate(): SparkSession = synchronized { val sparkConf = new SparkConf() options.foreach { case (k, v) => sparkConf.set(k, v) } if (!sparkConf.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { assertOnDriver() } // Get the session from current thread's active session. var session = activeThreadSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // Global synchronization so we will only set the default session once. SparkSession.synchronized { // If the current thread does not have an active session, get it from the global session. session = defaultSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // No active nor global default session. Create a new one. val sparkContext = userSuppliedContext.getOrElse { // set a random app name if not given. if (!sparkConf.contains("spark.app.name")) { sparkConf.setAppName(java.util.UUID.randomUUID().toString) } SparkContext.getOrCreate(sparkConf) // Do not update `SparkConf` for existing `SparkContext`, as it's shared by all sessions. } loadExtensions(extensions) applyExtensions(sparkContext, extensions) session = new SparkSession(sparkContext, None, None, extensions, options.toMap) setDefaultSession(session) setActiveSession(session) registerContextListener(sparkContext) } return session } } /** * Creates a [[SparkSession.Builder]] for constructing a [[SparkSession]]. * * @since 2.0.0 */ def builder(): Builder = new Builder /** * Changes the SparkSession that will be returned in this thread and its children when * SparkSession.getOrCreate() is called. This can be used to ensure that a given thread receives * a SparkSession with an isolated session, instead of the global (first created) context. * * @since 2.0.0 */ def setActiveSession(session: SparkSession): Unit = { activeThreadSession.set(session) } /** * Clears the active SparkSession for current thread. Subsequent calls to getOrCreate will * return the first created context instead of a thread-local override. * * @since 2.0.0 */ def clearActiveSession(): Unit = { activeThreadSession.remove() } /** * Sets the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def setDefaultSession(session: SparkSession): Unit = { defaultSession.set(session) } /** * Clears the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def clearDefaultSession(): Unit = { defaultSession.set(null) } /** * Returns the active SparkSession for the current thread, returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(activeThreadSession.get) } } /** * Returns the default SparkSession that is returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getDefaultSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(defaultSession.get) } } /** * Returns the currently active SparkSession, otherwise the default one. If there is no default * SparkSession, throws an exception. * * @since 2.4.0 */ def active: SparkSession = { getActiveSession.getOrElse(getDefaultSession.getOrElse( throw new IllegalStateException("No active or default Spark session found"))) } /** * Apply modifiable settings to an existing [[SparkSession]]. This method are used * both in Scala and Python, so put this under [[SparkSession]] object. */ private[sql] def applyModifiableSettings( session: SparkSession, options: java.util.HashMap[String, String]): Unit = { // Lazy val to avoid an unnecessary session state initialization lazy val conf = session.sessionState.conf val dedupOptions = if (options.isEmpty) Map.empty[String, String] else ( options.asScala.toSet -- conf.getAllConfs.toSet).toMap val (staticConfs, otherConfs) = dedupOptions.partition(kv => SQLConf.isStaticConfigKey(kv._1)) otherConfs.foreach { case (k, v) => conf.setConfString(k, v) } // Note that other runtime SQL options, for example, for other third-party datasource // can be marked as an ignored configuration here. val maybeIgnoredConfs = otherConfs.filterNot { case (k, _) => conf.isModifiable(k) } if (staticConfs.nonEmpty || maybeIgnoredConfs.nonEmpty) { logWarning( "Using an existing Spark session; only runtime SQL configurations will take effect.") } if (staticConfs.nonEmpty) { logDebug("Ignored static SQL configurations:\n " + conf.redactOptions(staticConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } if (maybeIgnoredConfs.nonEmpty) { // Only print out non-static and non-runtime SQL configurations. // Note that this might show core configurations or source specific // options defined in the third-party datasource. logDebug("Configurations that might not take effect:\n " + conf.redactOptions( maybeIgnoredConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } } /** * Returns a cloned SparkSession with all specified configurations disabled, or * the original SparkSession if all configurations are already disabled. */ private[sql] def getOrCloneSessionWithConfigsOff( session: SparkSession, configurations: Seq[ConfigEntry[Boolean]]): SparkSession = { val configsEnabled = configurations.filter(session.conf.get[Boolean]) if (configsEnabled.isEmpty) { session } else { val newSession = session.cloneSession() configsEnabled.foreach(conf => { newSession.conf.set(conf, false) }) newSession } } //////////////////////////////////////////////////////////////////////////////////////// // Private methods from now on //////////////////////////////////////////////////////////////////////////////////////// private val listenerRegistered: AtomicBoolean = new AtomicBoolean(false) /** Register the AppEnd listener onto the Context */ private def registerContextListener(sparkContext: SparkContext): Unit = { if (!listenerRegistered.get()) { sparkContext.addSparkListener(new SparkListener { override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { defaultSession.set(null) listenerRegistered.set(false) } }) listenerRegistered.set(true) } } /** The active SparkSession for the current thread. */ private val activeThreadSession = new InheritableThreadLocal[SparkSession] /** Reference to the root SparkSession. */ private val defaultSession = new AtomicReference[SparkSession] private val HIVE_SESSION_STATE_BUILDER_CLASS_NAME = "org.apache.spark.sql.hive.HiveSessionStateBuilder" private def sessionStateClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_SESSION_STATE_BUILDER_CLASS_NAME case "in-memory" => classOf[SessionStateBuilder].getCanonicalName } } private def assertOnDriver(): Unit = { if (TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkSession should only be created and accessed on the driver.") } } /** * Helper method to create an instance of `SessionState` based on `className` from conf. * The result is either `SessionState` or a Hive based `SessionState`. */ private def instantiateSessionState( className: String, sparkSession: SparkSession): SessionState = { try { // invoke new [Hive]SessionStateBuilder( // SparkSession, // Option[SessionState]) val clazz = Utils.classForName(className) val ctor = clazz.getConstructors.head ctor.newInstance(sparkSession, None).asInstanceOf[BaseSessionStateBuilder].build() } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Error while instantiating '$className':", e) } } /** * @return true if Hive classes can be loaded, otherwise false. */ private[spark] def hiveClassesArePresent: Boolean = { try { Utils.classForName(HIVE_SESSION_STATE_BUILDER_CLASS_NAME) Utils.classForName("org.apache.hadoop.hive.conf.HiveConf") true } catch { case _: ClassNotFoundException | _: NoClassDefFoundError => false } } private[spark] def cleanupAnyExistingSession(): Unit = { val session = getActiveSession.orElse(getDefaultSession) if (session.isDefined) { logWarning( s"""An existing Spark session exists as the active or default session. |This probably means another suite leaked it. Attempting to stop it before continuing. |This existing Spark session was created at: | |${session.get.creationSite.longForm} | """.stripMargin) session.get.stop() SparkSession.clearActiveSession() SparkSession.clearDefaultSession() } } /** * Initialize extensions specified in [[StaticSQLConf]]. The classes will be applied to the * extensions passed into this function. */ private def applyExtensions( sparkContext: SparkContext, extensions: SparkSessionExtensions): SparkSessionExtensions = { val extensionConfClassNames = sparkContext.getConf.get(StaticSQLConf.SPARK_SESSION_EXTENSIONS) .getOrElse(Seq.empty) extensionConfClassNames.foreach { extensionConfClassName => try { val extensionConfClass = Utils.classForName(extensionConfClassName) val extensionConf = extensionConfClass.getConstructor().newInstance() .asInstanceOf[SparkSessionExtensions => Unit] extensionConf(extensions) } catch { // Ignore the error if we cannot find the class or when the class has the wrong type. case e@(_: ClassCastException | _: ClassNotFoundException | _: NoClassDefFoundError) => logWarning(s"Cannot use $extensionConfClassName to configure session extensions.", e) } } extensions } /** * Load extensions from [[ServiceLoader]] and use them */ private def loadExtensions(extensions: SparkSessionExtensions): Unit = { val loader = ServiceLoader.load(classOf[SparkSessionExtensionsProvider], Utils.getContextOrSparkClassLoader) val loadedExts = loader.iterator() while (loadedExts.hasNext) { try { val ext = loadedExts.next() ext(extensions) } catch { case e: Throwable => logWarning("Failed to load session extension", e) } } }} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.spark.sqlimport java.io.Closeableimport java.util.{ServiceLoader, UUID}import java.util.concurrent.TimeUnit._import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}import scala.collection.JavaConverters._import scala.reflect.runtime.universe.TypeTagimport scala.util.control.NonFatalimport org.apache.spark.{SPARK_VERSION, SparkConf, SparkContext, TaskContext}import org.apache.spark.annotation.{DeveloperApi, Experimental, Stable, Unstable}import org.apache.spark.api.java.JavaRDDimport org.apache.spark.internal.Loggingimport org.apache.spark.internal.config.{ConfigEntry, EXECUTOR_ALLOW_SPARK_CONTEXT}import org.apache.spark.rdd.RDDimport org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}import org.apache.spark.sql.catalog.Catalogimport org.apache.spark.sql.catalyst._import org.apache.spark.sql.catalyst.analysis.{NameParameterizedQuery, PosParameterizedQuery, UnresolvedRelation}import org.apache.spark.sql.catalyst.encoders._import org.apache.spark.sql.catalyst.expressions.AttributeReferenceimport org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Range}import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributesimport org.apache.spark.sql.catalyst.util.CharVarcharUtilsimport org.apache.spark.sql.connector.ExternalCommandRunnerimport org.apache.spark.sql.errors.QueryCompilationErrorsimport org.apache.spark.sql.execution._import org.apache.spark.sql.execution.command.ExternalCommandExecutorimport org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}import org.apache.spark.sql.functions.litimport org.apache.spark.sql.internal._import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATIONimport org.apache.spark.sql.sources.BaseRelationimport org.apache.spark.sql.streaming._import org.apache.spark.sql.types.{DataType, StructType}import org.apache.spark.sql.util.ExecutionListenerManagerimport org.apache.spark.util.{CallSite, Utils}/** * The entry point to programming Spark with the Dataset and DataFrame API. * * In environments that this has been created upfront (e.g. REPL, notebooks), use the builder * to get an existing session: * * {{{ * SparkSession.builder().getOrCreate() * }}} * * The builder can also be used to create a new session: * * {{{ * SparkSession.builder * .master("local") * .appName("Word Count") * .config("spark.some.config.option", "some-value") * .getOrCreate() * }}} * * @param sparkContext The Spark context associated with this Spark session. * @param existingSharedState If supplied, use the existing shared state * instead of creating a new one. * @param parentSessionState If supplied, inherit all session state (i.e. temporary * views, SQL config, UDFs etc) from parent. */@Stableclass SparkSession private( @transient val sparkContext: SparkContext, @transient private val existingSharedState: Option[SharedState], @transient private val parentSessionState: Option[SessionState], @transient private[sql] val extensions: SparkSessionExtensions, @transient private[sql] val initialSessionOptions: Map[String, String]) extends Serializable with Closeable with Logging { self => // The call site where this SparkSession was constructed. private val creationSite: CallSite = Utils.getCallSite() /** * Constructor used in Pyspark. Contains explicit application of Spark Session Extensions * which otherwise only occurs during getOrCreate. We cannot add this to the default constructor * since that would cause every new session to reinvoke Spark Session Extensions on the currently * running extensions. */ private[sql] def this( sc: SparkContext, initialSessionOptions: java.util.HashMap[String, String]) = { this(sc, None, None, SparkSession.applyExtensions(sc, new SparkSessionExtensions), initialSessionOptions.asScala.toMap) } private[sql] def this(sc: SparkContext) = this(sc, new java.util.HashMap[String, String]()) private[sql] val sessionUUID: String = UUID.randomUUID.toString sparkContext.assertNotStopped() // If there is no active SparkSession, uses the default SQL conf. Otherwise, use the session's. SQLConf.setSQLConfGetter(() => { SparkSession.getActiveSession.filterNot(_.sparkContext.isStopped).map(_.sessionState.conf) .getOrElse(SQLConf.getFallbackConf) }) /** * The version of Spark on which this application is running. * * @since 2.0.0 */ def version: String = SPARK_VERSION /* ----------------------- * | Session-related state | * ----------------------- */ /** * State shared across sessions, including the `SparkContext`, cached data, listener, * and a catalog that interacts with external systems. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sharedState: SharedState = { existingSharedState.getOrElse(new SharedState(sparkContext, initialSessionOptions)) } /** * State isolated across sessions, including SQL configurations, temporary tables, registered * functions, and everything else that accepts a [[org.apache.spark.sql.internal.SQLConf]]. * If `parentSessionState` is not null, the `SessionState` will be a copy of the parent. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sessionState: SessionState = { parentSessionState .map(_.clone(this)) .getOrElse { val state = SparkSession.instantiateSessionState( SparkSession.sessionStateClassName(sharedState.conf), self) state } } /** * A wrapped version of this session in the form of a [[SQLContext]], for backward compatibility. * * @since 2.0.0 */ @transient val sqlContext: SQLContext = new SQLContext(this) /** * Runtime configuration interface for Spark. * * This is the interface through which the user can get and set all Spark and Hadoop * configurations that are relevant to Spark SQL. When getting the value of a config, * this defaults to the value set in the underlying `SparkContext`, if any. * * @since 2.0.0 */ @transient lazy val conf: RuntimeConfig = new RuntimeConfig(sessionState.conf) /** * An interface to register custom [[org.apache.spark.sql.util.QueryExecutionListener]]s * that listen for execution metrics. * * @since 2.0.0 */ def listenerManager: ExecutionListenerManager = sessionState.listenerManager /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @since 2.0.0 */ @Experimental @Unstable def experimental: ExperimentalMethods = sessionState.experimentalMethods /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sparkSession.udf.register("myUDF", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sparkSession.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @note The user-defined functions must be deterministic. Due to optimization, * duplicate invocations may be eliminated or the function may even be invoked more times than * it is present in the query. * * @since 2.0.0 */ def udf: UDFRegistration = sessionState.udfRegistration def udtf: UDTFRegistration = sessionState.udtfRegistration /** * Returns a `StreamingQueryManager` that allows managing all the * `StreamingQuery`s active on `this`. * * @since 2.0.0 */ @Unstable def streams: StreamingQueryManager = sessionState.streamingQueryManager /** * Start a new session with isolated SQL configurations, temporary tables, registered * functions are isolated, but sharing the underlying `SparkContext` and cached data. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. * * @since 2.0.0 */ def newSession(): SparkSession = { new SparkSession( sparkContext, Some(sharedState), parentSessionState = None, extensions, initialSessionOptions) } /** * Create an identical copy of this `SparkSession`, sharing the underlying `SparkContext` * and shared state. All the state of this session (i.e. SQL configurations, temporary tables, * registered functions) is copied over, and the cloned session is set up with the same shared * state as this session. The cloned session is independent of this session, that is, any * non-global change in either session is not reflected in the other. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. */ private[sql] def cloneSession(): SparkSession = { val result = new SparkSession( sparkContext, Some(sharedState), Some(sessionState), extensions, Map.empty) result.sessionState // force copy of SessionState result } /* --------------------------------- * | Methods for creating DataFrames | * --------------------------------- */ /** * Returns a `DataFrame` with no rows or columns. * * @since 2.0.0 */ @transient lazy val emptyDataFrame: DataFrame = Dataset.ofRows(self, LocalRelation()) /** * Creates a new [[Dataset]] of type T containing zero elements. * * @since 2.0.0 */ def emptyDataset[T: Encoder]: Dataset[T] = { val encoder = implicitly[Encoder[T]] new Dataset(self, LocalRelation(encoder.schema), encoder) } /** * Creates a `DataFrame` from an RDD of Product (e.g. case classes, tuples). * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = withActive { val encoder = Encoders.product[A] Dataset.ofRows(self, ExternalRDD(rdd, self)(encoder)) } /** * Creates a `DataFrame` from a local Seq of Product. * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = withActive { val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = toAttributes(schema) Dataset.ofRows(self, LocalRelation.fromProduct(attributeSeq, data)) } /** * :: DeveloperApi :: * Creates a `DataFrame` from an `RDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sparkSession = new org.apache.spark.sql.SparkSession(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sparkSession.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.createOrReplaceTempView("people") * sparkSession.sql("select name from people").collect.foreach(println) * }}} * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val encoder = ExpressionEncoder(replaced) val toRow = encoder.createSerializer() val catalystRows = rowRDD.map(toRow) internalCreateDataFrame(catalystRows.setName(rowRDD.name), schema) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `JavaRDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] createDataFrame(rowRDD.rdd, replaced) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `java.util.List` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided List matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] Dataset.ofRows(self, LocalRelation.fromExternalRows(toAttributes(replaced), rows.asScala.toSeq)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = withActive { val attributeSeq: Seq[AttributeReference] = getSchema(beanClass) val className = beanClass.getName val rowRdd = rdd.mapPartitions { iter => // BeanInfo is not serializable so we must rediscover it remotely for each partition. SQLContext.beansToRows(iter, Utils.classForName(className), attributeSeq) } Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRdd.setName(rdd.name))(self)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd.rdd, beanClass) } /** * Applies a schema to a List of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @since 1.6.0 */ def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = withActive { val attrSeq = getSchema(beanClass) val rows = SQLContext.beansToRows(data.asScala.iterator, beanClass, attrSeq) Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq)) } /** * Convert a `BaseRelation` created for external data sources into a `DataFrame`. * * @since 2.0.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { Dataset.ofRows(self, LogicalRelation(baseRelation)) } /* ------------------------------- * | Methods for creating DataSets | * ------------------------------- */ /** * Creates a [[Dataset]] from a local Seq of data of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Example == * * {{{ * * import spark.implicits._ * case class Person(name: String, age: Long) * val data = Seq(Person("Michael", 29), Person("Andy", 30), Person("Justin", 19)) * val ds = spark.createDataset(data) * * ds.show() * // +-------+---+ * // | name|age| * // +-------+---+ * // |Michael| 29| * // | Andy| 30| * // | Justin| 19| * // +-------+---+ * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = { val enc = encoderFor[T] val toRow = enc.createSerializer() val attributes = toAttributes(enc.schema) val encoded = data.map(d => toRow(d).copy()) val plan = new LocalRelation(attributes, encoded) Dataset[T](self, plan) } /** * Creates a [[Dataset]] from an RDD of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * @since 2.0.0 */ def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = { Dataset[T](self, ExternalRDD(data, self)) } /** * Creates a [[Dataset]] from a `java.util.List` of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Java Example == * * {{{ * List<String> data = Arrays.asList("hello", "world"); * Dataset<String> ds = spark.createDataset(data, Encoders.STRING()); * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = { createDataset(data.asScala.toSeq) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from 0 to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(end: Long): Dataset[java.lang.Long] = range(0, end) /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(start: Long, end: Long): Dataset[java.lang.Long] = { range(start, end, step = 1, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long): Dataset[java.lang.Long] = { range(start, end, step, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value, with partition number * specified. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = { new Dataset(self, Range(start, end, step, numPartitions), Encoders.LONG) } /** * Creates a `DataFrame` from an `RDD[InternalRow]`. */ private[sql] def internalCreateDataFrame( catalystRows: RDD[InternalRow], schema: StructType, isStreaming: Boolean = false): DataFrame = { // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val logicalPlan = LogicalRDD( toAttributes(schema), catalystRows, isStreaming = isStreaming)(self) Dataset.ofRows(self, logicalPlan) } /* ------------------------- * | Catalog-related methods | * ------------------------- */ /** * Interface through which the user may create, drop, alter or query underlying * databases, tables, functions etc. * * @since 2.0.0 */ @transient lazy val catalog: Catalog = new CatalogImpl(self) /** * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, * the returned DataFrame is simply the query plan of the view, which can either be a batch or * streaming query plan. * * @param tableName is either a qualified or unqualified name that designates a table or view. * If a database is specified, it identifies the table/view from the database. * Otherwise, it first attempts to find a temporary view with the given name * and then match the table/view from the current database. * Note that, the global temporary view database is also valid here. * @since 2.0.0 */ def table(tableName: String): DataFrame = { read.table(tableName) } private[sql] def table(tableIdent: TableIdentifier): DataFrame = { Dataset.ofRows(self, UnresolvedRelation(tableIdent)) } /* ----------------- * | Everything else | * ----------------- */ /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql(sqlText: String, args: Array[_], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { PosParameterizedQuery(parsedPlan, args.map(lit(_).expr)) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.5.0 */ @Experimental def sql(sqlText: String, args: Array[_]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql( sqlText: String, args: Map[String, Any], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { NameParameterizedQuery(parsedPlan, args.mapValues(lit(_).expr).toMap) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: Map[String, Any]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: java.util.Map[String, Any]): DataFrame = { sql(sqlText, args.asScala.toMap) } /** * Executes a SQL query using Spark, returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @since 2.0.0 */ def sql(sqlText: String): DataFrame = sql(sqlText, Map.empty[String, Any]) /** * Execute an arbitrary string command inside an external execution engine rather than Spark. * This could be useful when user wants to execute some commands out of Spark. For * example, executing custom DDL/DML command for JDBC, creating index for ElasticSearch, * creating cores for Solr and so on. * * The command will be eagerly executed after this method is called and the returned * DataFrame will contain the output of the command(if any). * * @param runner The class name of the runner that implements `ExternalCommandRunner`. * @param command The target command to be executed * @param options The options for the runner. * * @since 3.0.0 */ @Unstable def executeCommand(runner: String, command: String, options: Map[String, String]): DataFrame = { DataSource.lookupDataSource(runner, sessionState.conf) match { case source if classOf[ExternalCommandRunner].isAssignableFrom(source) => Dataset.ofRows(self, ExternalCommandExecutor( source.newInstance().asInstanceOf[ExternalCommandRunner], command, options)) case _ => throw QueryCompilationErrors.commandExecutionInRunnerUnsupportedError(runner) } } /** * Returns a [[DataFrameReader]] that can be used to read non-streaming data in as a * `DataFrame`. * {{{ * sparkSession.read.parquet("/path/to/file.parquet") * sparkSession.read.schema(schema).json("/path/to/file.json") * }}} * * @since 2.0.0 */ def read: DataFrameReader = new DataFrameReader(self) /** * Returns a `DataStreamReader` that can be used to read streaming data in as a `DataFrame`. * {{{ * sparkSession.readStream.parquet("/path/to/directory/of/parquet/files") * sparkSession.readStream.schema(schema).json("/path/to/directory/of/json/files") * }}} * * @since 2.0.0 */ def readStream: DataStreamReader = new DataStreamReader(self) /** * Executes some code block and prints to stdout the time taken to execute the block. This is * available in Scala only and is used primarily for interactive testing and debugging. * * @since 2.1.0 */ def time[T](f: => T): T = { val start = System.nanoTime() val ret = f val end = System.nanoTime() // scalastyle:off println println(s"Time taken: ${NANOSECONDS.toMillis(end - start)} ms") // scalastyle:on println ret } // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into `DataFrame`s. * * {{{ * val sparkSession = SparkSession.builder.getOrCreate() * import sparkSession.implicits._ * }}} * * @since 2.0.0 */ object implicits extends SQLImplicits with Serializable { protected override def _sqlContext: SQLContext = SparkSession.this.sqlContext } // scalastyle:on /** * Stop the underlying `SparkContext`. * * @since 2.0.0 */ def stop(): Unit = { sparkContext.stop() } /** * Synonym for `stop()`. * * @since 2.1.0 */ override def close(): Unit = stop() /** * Parses the data type in our internal string representation. The data type string should * have the same format as the one generated by `toString` in scala. * It is only used by PySpark. */ protected[sql] def parseDataType(dataTypeString: String): DataType = { DataType.fromJson(dataTypeString) } /** * Apply a schema defined by the schemaString to an RDD. It is only used by PySpark. */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schemaString: String): DataFrame = { val schema = DataType.fromJson(schemaString).asInstanceOf[StructType] applySchemaToPythonRDD(rdd, schema) } /** * Apply `schema` to an RDD. * * @note Used by PySpark only */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schema: StructType): DataFrame = { val rowRdd = rdd.mapPartitions { iter => val fromJava = python.EvaluatePython.makeFromJava(schema) iter.map(r => fromJava(r).asInstanceOf[InternalRow]) } internalCreateDataFrame(rowRdd, schema) } /** * Returns a Catalyst Schema for the given java bean class. */ private def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } } /** * Execute a block of code with the this session set as the active session, and restore the * previous session on completion. */ private[sql] def withActive[T](block: => T): T = { // Use the active session thread local directly to make sure we get the session that is actually // set and not the default session. This to prevent that we promote the default session to the // active session once we are done. val old = SparkSession.activeThreadSession.get() SparkSession.setActiveSession(this) try block finally { SparkSession.setActiveSession(old) } } private[sql] def leafNodeDefaultParallelism: Int = { conf.get(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM).getOrElse(sparkContext.defaultParallelism) }}@Stableobject SparkSession extends Logging { /** * Builder for [[SparkSession]]. */ @Stable class Builder extends Logging { private[this] val options = new scala.collection.mutable.HashMap[String, String] private[this] val extensions = new SparkSessionExtensions private[this] var userSuppliedContext: Option[SparkContext] = None private[spark] def sparkContext(sparkContext: SparkContext): Builder = synchronized { userSuppliedContext = Option(sparkContext) this } /** * Sets a name for the application, which will be shown in the Spark web UI. * If no application name is set, a randomly generated name will be used. * * @since 2.0.0 */ def appName(name: String): Builder = config("spark.app.name", name) /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: String): Builder = synchronized { options += key -> value this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Long): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Double): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Boolean): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: Map[String, Any]): Builder = synchronized { map.foreach { kv: (String, Any) => { options += kv._1 -> kv._2.toString } } this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: java.util.Map[String, Any]): Builder = synchronized { config(map.asScala.toMap) } /** * Sets a list of config options based on the given `SparkConf`. * * @since 2.0.0 */ def config(conf: SparkConf): Builder = synchronized { conf.getAll.foreach { case (k, v) => options += k -> v } this } /** * Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. * * @since 2.0.0 */ def master(master: String): Builder = config("spark.master", master) /** * Enables Hive support, including connectivity to a persistent Hive metastore, support for * Hive serdes, and Hive user-defined functions. * * @since 2.0.0 */ def enableHiveSupport(): Builder = synchronized { if (hiveClassesArePresent) { config(CATALOG_IMPLEMENTATION.key, "hive") } else { throw new IllegalArgumentException( "Unable to instantiate SparkSession with Hive support because " + "Hive classes are not found.") } } /** * Inject extensions into the [[SparkSession]]. This allows a user to add Analyzer rules, * Optimizer rules, Planning Strategies or a customized parser. * * @since 2.2.0 */ def withExtensions(f: SparkSessionExtensions => Unit): Builder = synchronized { f(extensions) this } /** * Gets an existing [[SparkSession]] or, if there is no existing one, creates a new * one based on the options set in this builder. * * This method first checks whether there is a valid thread-local SparkSession, * and if yes, return that one. It then checks whether there is a valid global * default SparkSession, and if yes, return that one. If no valid global default * SparkSession exists, the method creates a new SparkSession and assigns the * newly created SparkSession as the global default. * * In case an existing SparkSession is returned, the non-static config options specified in * this builder will be applied to the existing SparkSession. * * @since 2.0.0 */ def getOrCreate(): SparkSession = synchronized { val sparkConf = new SparkConf() options.foreach { case (k, v) => sparkConf.set(k, v) } if (!sparkConf.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { assertOnDriver() } // Get the session from current thread's active session. var session = activeThreadSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // Global synchronization so we will only set the default session once. SparkSession.synchronized { // If the current thread does not have an active session, get it from the global session. session = defaultSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // No active nor global default session. Create a new one. val sparkContext = userSuppliedContext.getOrElse { // set a random app name if not given. if (!sparkConf.contains("spark.app.name")) { sparkConf.setAppName(java.util.UUID.randomUUID().toString) } SparkContext.getOrCreate(sparkConf) // Do not update `SparkConf` for existing `SparkContext`, as it's shared by all sessions. } loadExtensions(extensions) applyExtensions(sparkContext, extensions) session = new SparkSession(sparkContext, None, None, extensions, options.toMap) setDefaultSession(session) setActiveSession(session) registerContextListener(sparkContext) } return session } } /** * Creates a [[SparkSession.Builder]] for constructing a [[SparkSession]]. * * @since 2.0.0 */ def builder(): Builder = new Builder /** * Changes the SparkSession that will be returned in this thread and its children when * SparkSession.getOrCreate() is called. This can be used to ensure that a given thread receives * a SparkSession with an isolated session, instead of the global (first created) context. * * @since 2.0.0 */ def setActiveSession(session: SparkSession): Unit = { activeThreadSession.set(session) } /** * Clears the active SparkSession for current thread. Subsequent calls to getOrCreate will * return the first created context instead of a thread-local override. * * @since 2.0.0 */ def clearActiveSession(): Unit = { activeThreadSession.remove() } /** * Sets the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def setDefaultSession(session: SparkSession): Unit = { defaultSession.set(session) } /** * Clears the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def clearDefaultSession(): Unit = { defaultSession.set(null) } /** * Returns the active SparkSession for the current thread, returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(activeThreadSession.get) } } /** * Returns the default SparkSession that is returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getDefaultSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(defaultSession.get) } } /** * Returns the currently active SparkSession, otherwise the default one. If there is no default * SparkSession, throws an exception. * * @since 2.4.0 */ def active: SparkSession = { getActiveSession.getOrElse(getDefaultSession.getOrElse( throw new IllegalStateException("No active or default Spark session found"))) } /** * Apply modifiable settings to an existing [[SparkSession]]. This method are used * both in Scala and Python, so put this under [[SparkSession]] object. */ private[sql] def applyModifiableSettings( session: SparkSession, options: java.util.HashMap[String, String]): Unit = { // Lazy val to avoid an unnecessary session state initialization lazy val conf = session.sessionState.conf val dedupOptions = if (options.isEmpty) Map.empty[String, String] else ( options.asScala.toSet -- conf.getAllConfs.toSet).toMap val (staticConfs, otherConfs) = dedupOptions.partition(kv => SQLConf.isStaticConfigKey(kv._1)) otherConfs.foreach { case (k, v) => conf.setConfString(k, v) } // Note that other runtime SQL options, for example, for other third-party datasource // can be marked as an ignored configuration here. val maybeIgnoredConfs = otherConfs.filterNot { case (k, _) => conf.isModifiable(k) } if (staticConfs.nonEmpty || maybeIgnoredConfs.nonEmpty) { logWarning( "Using an existing Spark session; only runtime SQL configurations will take effect.") } if (staticConfs.nonEmpty) { logDebug("Ignored static SQL configurations:\n " + conf.redactOptions(staticConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } if (maybeIgnoredConfs.nonEmpty) { // Only print out non-static and non-runtime SQL configurations. // Note that this might show core configurations or source specific // options defined in the third-party datasource. logDebug("Configurations that might not take effect:\n " + conf.redactOptions( maybeIgnoredConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } } /** * Returns a cloned SparkSession with all specified configurations disabled, or * the original SparkSession if all configurations are already disabled. */ private[sql] def getOrCloneSessionWithConfigsOff( session: SparkSession, configurations: Seq[ConfigEntry[Boolean]]): SparkSession = { val configsEnabled = configurations.filter(session.conf.get[Boolean]) if (configsEnabled.isEmpty) { session } else { val newSession = session.cloneSession() configsEnabled.foreach(conf => { newSession.conf.set(conf, false) }) newSession } } //////////////////////////////////////////////////////////////////////////////////////// // Private methods from now on //////////////////////////////////////////////////////////////////////////////////////// private val listenerRegistered: AtomicBoolean = new AtomicBoolean(false) /** Register the AppEnd listener onto the Context */ private def registerContextListener(sparkContext: SparkContext): Unit = { if (!listenerRegistered.get()) { sparkContext.addSparkListener(new SparkListener { override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { defaultSession.set(null) listenerRegistered.set(false) } }) listenerRegistered.set(true) } } /** The active SparkSession for the current thread. */ private val activeThreadSession = new InheritableThreadLocal[SparkSession] /** Reference to the root SparkSession. */ private val defaultSession = new AtomicReference[SparkSession] private val HIVE_SESSION_STATE_BUILDER_CLASS_NAME = "org.apache.spark.sql.hive.HiveSessionStateBuilder" private def sessionStateClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_SESSION_STATE_BUILDER_CLASS_NAME case "in-memory" => classOf[SessionStateBuilder].getCanonicalName } } private def assertOnDriver(): Unit = { if (TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkSession should only be created and accessed on the driver.") } } /** * Helper method to create an instance of `SessionState` based on `className` from conf. * The result is either `SessionState` or a Hive based `SessionState`. */ private def instantiateSessionState( className: String, sparkSession: SparkSession): SessionState = { try { // invoke new [Hive]SessionStateBuilder( // SparkSession, // Option[SessionState]) val clazz = Utils.classForName(className) val ctor = clazz.getConstructors.head ctor.newInstance(sparkSession, None).asInstanceOf[BaseSessionStateBuilder].build() } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Error while instantiating '$className':", e) } } /** * @return true if Hive classes can be loaded, otherwise false. */ private[spark] def hiveClassesArePresent: Boolean = { try { Utils.classForName(HIVE_SESSION_STATE_BUILDER_CLASS_NAME) Utils.classForName("org.apache.hadoop.hive.conf.HiveConf") true } catch { case _: ClassNotFoundException | _: NoClassDefFoundError => false } } private[spark] def cleanupAnyExistingSession(): Unit = { val session = getActiveSession.orElse(getDefaultSession) if (session.isDefined) { logWarning( s"""An existing Spark session exists as the active or default session. |This probably means another suite leaked it. Attempting to stop it before continuing. |This existing Spark session was created at: | |${session.get.creationSite.longForm} | """.stripMargin) session.get.stop() SparkSession.clearActiveSession() SparkSession.clearDefaultSession() } } /** * Initialize extensions specified in [[StaticSQLConf]]. The classes will be applied to the * extensions passed into this function. */ private def applyExtensions( sparkContext: SparkContext, extensions: SparkSessionExtensions): SparkSessionExtensions = { val extensionConfClassNames = sparkContext.getConf.get(StaticSQLConf.SPARK_SESSION_EXTENSIONS) .getOrElse(Seq.empty) extensionConfClassNames.foreach { extensionConfClassName => try { val extensionConfClass = Utils.classForName(extensionConfClassName) val extensionConf = extensionConfClass.getConstructor().newInstance() .asInstanceOf[SparkSessionExtensions => Unit] extensionConf(extensions) } catch { // Ignore the error if we cannot find the class or when the class has the wrong type. case e@(_: ClassCastException | _: ClassNotFoundException | _: NoClassDefFoundError) => logWarning(s"Cannot use $extensionConfClassName to configure session extensions.", e) } } extensions } /** * Load extensions from [[ServiceLoader]] and use them */ private def loadExtensions(extensions: SparkSessionExtensions): Unit = { val loader = ServiceLoader.load(classOf[SparkSessionExtensionsProvider], Utils.getContextOrSparkClassLoader) val loadedExts = loader.iterator() while (loadedExts.hasNext) { try { val ext = loadedExts.next() ext(extensions) } catch { case e: Throwable => logWarning("Failed to load session extension", e) } } }} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.spark.sqlimport java.io.Closeableimport java.util.{ServiceLoader, UUID}import java.util.concurrent.TimeUnit._import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}import scala.collection.JavaConverters._import scala.reflect.runtime.universe.TypeTagimport scala.util.control.NonFatalimport org.apache.spark.{SPARK_VERSION, SparkConf, SparkContext, TaskContext}import org.apache.spark.annotation.{DeveloperApi, Experimental, Stable, Unstable}import org.apache.spark.api.java.JavaRDDimport org.apache.spark.internal.Loggingimport org.apache.spark.internal.config.{ConfigEntry, EXECUTOR_ALLOW_SPARK_CONTEXT}import org.apache.spark.rdd.RDDimport org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}import org.apache.spark.sql.catalog.Catalogimport org.apache.spark.sql.catalyst._import org.apache.spark.sql.catalyst.analysis.{NameParameterizedQuery, PosParameterizedQuery, UnresolvedRelation}import org.apache.spark.sql.catalyst.encoders._import org.apache.spark.sql.catalyst.expressions.AttributeReferenceimport org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Range}import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributesimport org.apache.spark.sql.catalyst.util.CharVarcharUtilsimport org.apache.spark.sql.connector.ExternalCommandRunnerimport org.apache.spark.sql.errors.QueryCompilationErrorsimport org.apache.spark.sql.execution._import org.apache.spark.sql.execution.command.ExternalCommandExecutorimport org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}import org.apache.spark.sql.functions.litimport org.apache.spark.sql.internal._import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATIONimport org.apache.spark.sql.sources.BaseRelationimport org.apache.spark.sql.streaming._import org.apache.spark.sql.types.{DataType, StructType}import org.apache.spark.sql.util.ExecutionListenerManagerimport org.apache.spark.util.{CallSite, Utils}/** * The entry point to programming Spark with the Dataset and DataFrame API. * * In environments that this has been created upfront (e.g. REPL, notebooks), use the builder * to get an existing session: * * {{{ * SparkSession.builder().getOrCreate() * }}} * * The builder can also be used to create a new session: * * {{{ * SparkSession.builder * .master("local") * .appName("Word Count") * .config("spark.some.config.option", "some-value") * .getOrCreate() * }}} * * @param sparkContext The Spark context associated with this Spark session. * @param existingSharedState If supplied, use the existing shared state * instead of creating a new one. * @param parentSessionState If supplied, inherit all session state (i.e. temporary * views, SQL config, UDFs etc) from parent. */@Stableclass SparkSession private( @transient val sparkContext: SparkContext, @transient private val existingSharedState: Option[SharedState], @transient private val parentSessionState: Option[SessionState], @transient private[sql] val extensions: SparkSessionExtensions, @transient private[sql] val initialSessionOptions: Map[String, String]) extends Serializable with Closeable with Logging { self => // The call site where this SparkSession was constructed. private val creationSite: CallSite = Utils.getCallSite() /** * Constructor used in Pyspark. Contains explicit application of Spark Session Extensions * which otherwise only occurs during getOrCreate. We cannot add this to the default constructor * since that would cause every new session to reinvoke Spark Session Extensions on the currently * running extensions. */ private[sql] def this( sc: SparkContext, initialSessionOptions: java.util.HashMap[String, String]) = { this(sc, None, None, SparkSession.applyExtensions(sc, new SparkSessionExtensions), initialSessionOptions.asScala.toMap) } private[sql] def this(sc: SparkContext) = this(sc, new java.util.HashMap[String, String]()) private[sql] val sessionUUID: String = UUID.randomUUID.toString sparkContext.assertNotStopped() // If there is no active SparkSession, uses the default SQL conf. Otherwise, use the session's. SQLConf.setSQLConfGetter(() => { SparkSession.getActiveSession.filterNot(_.sparkContext.isStopped).map(_.sessionState.conf) .getOrElse(SQLConf.getFallbackConf) }) /** * The version of Spark on which this application is running. * * @since 2.0.0 */ def version: String = SPARK_VERSION /* ----------------------- * | Session-related state | * ----------------------- */ /** * State shared across sessions, including the `SparkContext`, cached data, listener, * and a catalog that interacts with external systems. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sharedState: SharedState = { existingSharedState.getOrElse(new SharedState(sparkContext, initialSessionOptions)) } /** * State isolated across sessions, including SQL configurations, temporary tables, registered * functions, and everything else that accepts a [[org.apache.spark.sql.internal.SQLConf]]. * If `parentSessionState` is not null, the `SessionState` will be a copy of the parent. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sessionState: SessionState = { parentSessionState .map(_.clone(this)) .getOrElse { val state = SparkSession.instantiateSessionState( SparkSession.sessionStateClassName(sharedState.conf), self) state } } /** * A wrapped version of this session in the form of a [[SQLContext]], for backward compatibility. * * @since 2.0.0 */ @transient val sqlContext: SQLContext = new SQLContext(this) /** * Runtime configuration interface for Spark. * * This is the interface through which the user can get and set all Spark and Hadoop * configurations that are relevant to Spark SQL. When getting the value of a config, * this defaults to the value set in the underlying `SparkContext`, if any. * * @since 2.0.0 */ @transient lazy val conf: RuntimeConfig = new RuntimeConfig(sessionState.conf) /** * An interface to register custom [[org.apache.spark.sql.util.QueryExecutionListener]]s * that listen for execution metrics. * * @since 2.0.0 */ def listenerManager: ExecutionListenerManager = sessionState.listenerManager /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @since 2.0.0 */ @Experimental @Unstable def experimental: ExperimentalMethods = sessionState.experimentalMethods /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sparkSession.udf.register("myUDF", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sparkSession.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @note The user-defined functions must be deterministic. Due to optimization, * duplicate invocations may be eliminated or the function may even be invoked more times than * it is present in the query. * * @since 2.0.0 */ def udf: UDFRegistration = sessionState.udfRegistration def udtf: UDTFRegistration = sessionState.udtfRegistration /** * Returns a `StreamingQueryManager` that allows managing all the * `StreamingQuery`s active on `this`. * * @since 2.0.0 */ @Unstable def streams: StreamingQueryManager = sessionState.streamingQueryManager /** * Start a new session with isolated SQL configurations, temporary tables, registered * functions are isolated, but sharing the underlying `SparkContext` and cached data. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. * * @since 2.0.0 */ def newSession(): SparkSession = { new SparkSession( sparkContext, Some(sharedState), parentSessionState = None, extensions, initialSessionOptions) } /** * Create an identical copy of this `SparkSession`, sharing the underlying `SparkContext` * and shared state. All the state of this session (i.e. SQL configurations, temporary tables, * registered functions) is copied over, and the cloned session is set up with the same shared * state as this session. The cloned session is independent of this session, that is, any * non-global change in either session is not reflected in the other. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. */ private[sql] def cloneSession(): SparkSession = { val result = new SparkSession( sparkContext, Some(sharedState), Some(sessionState), extensions, Map.empty) result.sessionState // force copy of SessionState result } /* --------------------------------- * | Methods for creating DataFrames | * --------------------------------- */ /** * Returns a `DataFrame` with no rows or columns. * * @since 2.0.0 */ @transient lazy val emptyDataFrame: DataFrame = Dataset.ofRows(self, LocalRelation()) /** * Creates a new [[Dataset]] of type T containing zero elements. * * @since 2.0.0 */ def emptyDataset[T: Encoder]: Dataset[T] = { val encoder = implicitly[Encoder[T]] new Dataset(self, LocalRelation(encoder.schema), encoder) } /** * Creates a `DataFrame` from an RDD of Product (e.g. case classes, tuples). * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = withActive { val encoder = Encoders.product[A] Dataset.ofRows(self, ExternalRDD(rdd, self)(encoder)) } /** * Creates a `DataFrame` from a local Seq of Product. * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = withActive { val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = toAttributes(schema) Dataset.ofRows(self, LocalRelation.fromProduct(attributeSeq, data)) } /** * :: DeveloperApi :: * Creates a `DataFrame` from an `RDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sparkSession = new org.apache.spark.sql.SparkSession(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sparkSession.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.createOrReplaceTempView("people") * sparkSession.sql("select name from people").collect.foreach(println) * }}} * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val encoder = ExpressionEncoder(replaced) val toRow = encoder.createSerializer() val catalystRows = rowRDD.map(toRow) internalCreateDataFrame(catalystRows.setName(rowRDD.name), schema) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `JavaRDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] createDataFrame(rowRDD.rdd, replaced) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `java.util.List` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided List matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] Dataset.ofRows(self, LocalRelation.fromExternalRows(toAttributes(replaced), rows.asScala.toSeq)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = withActive { val attributeSeq: Seq[AttributeReference] = getSchema(beanClass) val className = beanClass.getName val rowRdd = rdd.mapPartitions { iter => // BeanInfo is not serializable so we must rediscover it remotely for each partition. SQLContext.beansToRows(iter, Utils.classForName(className), attributeSeq) } Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRdd.setName(rdd.name))(self)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd.rdd, beanClass) } /** * Applies a schema to a List of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @since 1.6.0 */ def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = withActive { val attrSeq = getSchema(beanClass) val rows = SQLContext.beansToRows(data.asScala.iterator, beanClass, attrSeq) Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq)) } /** * Convert a `BaseRelation` created for external data sources into a `DataFrame`. * * @since 2.0.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { Dataset.ofRows(self, LogicalRelation(baseRelation)) } /* ------------------------------- * | Methods for creating DataSets | * ------------------------------- */ /** * Creates a [[Dataset]] from a local Seq of data of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Example == * * {{{ * * import spark.implicits._ * case class Person(name: String, age: Long) * val data = Seq(Person("Michael", 29), Person("Andy", 30), Person("Justin", 19)) * val ds = spark.createDataset(data) * * ds.show() * // +-------+---+ * // | name|age| * // +-------+---+ * // |Michael| 29| * // | Andy| 30| * // | Justin| 19| * // +-------+---+ * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = { val enc = encoderFor[T] val toRow = enc.createSerializer() val attributes = toAttributes(enc.schema) val encoded = data.map(d => toRow(d).copy()) val plan = new LocalRelation(attributes, encoded) Dataset[T](self, plan) } /** * Creates a [[Dataset]] from an RDD of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * @since 2.0.0 */ def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = { Dataset[T](self, ExternalRDD(data, self)) } /** * Creates a [[Dataset]] from a `java.util.List` of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Java Example == * * {{{ * List<String> data = Arrays.asList("hello", "world"); * Dataset<String> ds = spark.createDataset(data, Encoders.STRING()); * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = { createDataset(data.asScala.toSeq) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from 0 to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(end: Long): Dataset[java.lang.Long] = range(0, end) /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(start: Long, end: Long): Dataset[java.lang.Long] = { range(start, end, step = 1, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long): Dataset[java.lang.Long] = { range(start, end, step, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value, with partition number * specified. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = { new Dataset(self, Range(start, end, step, numPartitions), Encoders.LONG) } /** * Creates a `DataFrame` from an `RDD[InternalRow]`. */ private[sql] def internalCreateDataFrame( catalystRows: RDD[InternalRow], schema: StructType, isStreaming: Boolean = false): DataFrame = { // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val logicalPlan = LogicalRDD( toAttributes(schema), catalystRows, isStreaming = isStreaming)(self) Dataset.ofRows(self, logicalPlan) } /* ------------------------- * | Catalog-related methods | * ------------------------- */ /** * Interface through which the user may create, drop, alter or query underlying * databases, tables, functions etc. * * @since 2.0.0 */ @transient lazy val catalog: Catalog = new CatalogImpl(self) /** * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, * the returned DataFrame is simply the query plan of the view, which can either be a batch or * streaming query plan. * * @param tableName is either a qualified or unqualified name that designates a table or view. * If a database is specified, it identifies the table/view from the database. * Otherwise, it first attempts to find a temporary view with the given name * and then match the table/view from the current database. * Note that, the global temporary view database is also valid here. * @since 2.0.0 */ def table(tableName: String): DataFrame = { read.table(tableName) } private[sql] def table(tableIdent: TableIdentifier): DataFrame = { Dataset.ofRows(self, UnresolvedRelation(tableIdent)) } /* ----------------- * | Everything else | * ----------------- */ /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql(sqlText: String, args: Array[_], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { PosParameterizedQuery(parsedPlan, args.map(lit(_).expr)) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.5.0 */ @Experimental def sql(sqlText: String, args: Array[_]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql( sqlText: String, args: Map[String, Any], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { NameParameterizedQuery(parsedPlan, args.mapValues(lit(_).expr).toMap) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: Map[String, Any]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: java.util.Map[String, Any]): DataFrame = { sql(sqlText, args.asScala.toMap) } /** * Executes a SQL query using Spark, returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @since 2.0.0 */ def sql(sqlText: String): DataFrame = sql(sqlText, Map.empty[String, Any]) /** * Execute an arbitrary string command inside an external execution engine rather than Spark. * This could be useful when user wants to execute some commands out of Spark. For * example, executing custom DDL/DML command for JDBC, creating index for ElasticSearch, * creating cores for Solr and so on. * * The command will be eagerly executed after this method is called and the returned * DataFrame will contain the output of the command(if any). * * @param runner The class name of the runner that implements `ExternalCommandRunner`. * @param command The target command to be executed * @param options The options for the runner. * * @since 3.0.0 */ @Unstable def executeCommand(runner: String, command: String, options: Map[String, String]): DataFrame = { DataSource.lookupDataSource(runner, sessionState.conf) match { case source if classOf[ExternalCommandRunner].isAssignableFrom(source) => Dataset.ofRows(self, ExternalCommandExecutor( source.newInstance().asInstanceOf[ExternalCommandRunner], command, options)) case _ => throw QueryCompilationErrors.commandExecutionInRunnerUnsupportedError(runner) } } /** * Returns a [[DataFrameReader]] that can be used to read non-streaming data in as a * `DataFrame`. * {{{ * sparkSession.read.parquet("/path/to/file.parquet") * sparkSession.read.schema(schema).json("/path/to/file.json") * }}} * * @since 2.0.0 */ def read: DataFrameReader = new DataFrameReader(self) /** * Returns a `DataStreamReader` that can be used to read streaming data in as a `DataFrame`. * {{{ * sparkSession.readStream.parquet("/path/to/directory/of/parquet/files") * sparkSession.readStream.schema(schema).json("/path/to/directory/of/json/files") * }}} * * @since 2.0.0 */ def readStream: DataStreamReader = new DataStreamReader(self) /** * Executes some code block and prints to stdout the time taken to execute the block. This is * available in Scala only and is used primarily for interactive testing and debugging. * * @since 2.1.0 */ def time[T](f: => T): T = { val start = System.nanoTime() val ret = f val end = System.nanoTime() // scalastyle:off println println(s"Time taken: ${NANOSECONDS.toMillis(end - start)} ms") // scalastyle:on println ret } // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into `DataFrame`s. * * {{{ * val sparkSession = SparkSession.builder.getOrCreate() * import sparkSession.implicits._ * }}} * * @since 2.0.0 */ object implicits extends SQLImplicits with Serializable { protected override def _sqlContext: SQLContext = SparkSession.this.sqlContext } // scalastyle:on /** * Stop the underlying `SparkContext`. * * @since 2.0.0 */ def stop(): Unit = { sparkContext.stop() } /** * Synonym for `stop()`. * * @since 2.1.0 */ override def close(): Unit = stop() /** * Parses the data type in our internal string representation. The data type string should * have the same format as the one generated by `toString` in scala. * It is only used by PySpark. */ protected[sql] def parseDataType(dataTypeString: String): DataType = { DataType.fromJson(dataTypeString) } /** * Apply a schema defined by the schemaString to an RDD. It is only used by PySpark. */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schemaString: String): DataFrame = { val schema = DataType.fromJson(schemaString).asInstanceOf[StructType] applySchemaToPythonRDD(rdd, schema) } /** * Apply `schema` to an RDD. * * @note Used by PySpark only */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schema: StructType): DataFrame = { val rowRdd = rdd.mapPartitions { iter => val fromJava = python.EvaluatePython.makeFromJava(schema) iter.map(r => fromJava(r).asInstanceOf[InternalRow]) } internalCreateDataFrame(rowRdd, schema) } /** * Returns a Catalyst Schema for the given java bean class. */ private def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } } /** * Execute a block of code with the this session set as the active session, and restore the * previous session on completion. */ private[sql] def withActive[T](block: => T): T = { // Use the active session thread local directly to make sure we get the session that is actually // set and not the default session. This to prevent that we promote the default session to the // active session once we are done. val old = SparkSession.activeThreadSession.get() SparkSession.setActiveSession(this) try block finally { SparkSession.setActiveSession(old) } } private[sql] def leafNodeDefaultParallelism: Int = { conf.get(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM).getOrElse(sparkContext.defaultParallelism) }}@Stableobject SparkSession extends Logging { /** * Builder for [[SparkSession]]. */ @Stable class Builder extends Logging { private[this] val options = new scala.collection.mutable.HashMap[String, String] private[this] val extensions = new SparkSessionExtensions private[this] var userSuppliedContext: Option[SparkContext] = None private[spark] def sparkContext(sparkContext: SparkContext): Builder = synchronized { userSuppliedContext = Option(sparkContext) this } /** * Sets a name for the application, which will be shown in the Spark web UI. * If no application name is set, a randomly generated name will be used. * * @since 2.0.0 */ def appName(name: String): Builder = config("spark.app.name", name) /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: String): Builder = synchronized { options += key -> value this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Long): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Double): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Boolean): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: Map[String, Any]): Builder = synchronized { map.foreach { kv: (String, Any) => { options += kv._1 -> kv._2.toString } } this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: java.util.Map[String, Any]): Builder = synchronized { config(map.asScala.toMap) } /** * Sets a list of config options based on the given `SparkConf`. * * @since 2.0.0 */ def config(conf: SparkConf): Builder = synchronized { conf.getAll.foreach { case (k, v) => options += k -> v } this } /** * Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. * * @since 2.0.0 */ def master(master: String): Builder = config("spark.master", master) /** * Enables Hive support, including connectivity to a persistent Hive metastore, support for * Hive serdes, and Hive user-defined functions. * * @since 2.0.0 */ def enableHiveSupport(): Builder = synchronized { if (hiveClassesArePresent) { config(CATALOG_IMPLEMENTATION.key, "hive") } else { throw new IllegalArgumentException( "Unable to instantiate SparkSession with Hive support because " + "Hive classes are not found.") } } /** * Inject extensions into the [[SparkSession]]. This allows a user to add Analyzer rules, * Optimizer rules, Planning Strategies or a customized parser. * * @since 2.2.0 */ def withExtensions(f: SparkSessionExtensions => Unit): Builder = synchronized { f(extensions) this } /** * Gets an existing [[SparkSession]] or, if there is no existing one, creates a new * one based on the options set in this builder. * * This method first checks whether there is a valid thread-local SparkSession, * and if yes, return that one. It then checks whether there is a valid global * default SparkSession, and if yes, return that one. If no valid global default * SparkSession exists, the method creates a new SparkSession and assigns the * newly created SparkSession as the global default. * * In case an existing SparkSession is returned, the non-static config options specified in * this builder will be applied to the existing SparkSession. * * @since 2.0.0 */ def getOrCreate(): SparkSession = synchronized { val sparkConf = new SparkConf() options.foreach { case (k, v) => sparkConf.set(k, v) } if (!sparkConf.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { assertOnDriver() } // Get the session from current thread's active session. var session = activeThreadSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // Global synchronization so we will only set the default session once. SparkSession.synchronized { // If the current thread does not have an active session, get it from the global session. session = defaultSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // No active nor global default session. Create a new one. val sparkContext = userSuppliedContext.getOrElse { // set a random app name if not given. if (!sparkConf.contains("spark.app.name")) { sparkConf.setAppName(java.util.UUID.randomUUID().toString) } SparkContext.getOrCreate(sparkConf) // Do not update `SparkConf` for existing `SparkContext`, as it's shared by all sessions. } loadExtensions(extensions) applyExtensions(sparkContext, extensions) session = new SparkSession(sparkContext, None, None, extensions, options.toMap) setDefaultSession(session) setActiveSession(session) registerContextListener(sparkContext) } return session } } /** * Creates a [[SparkSession.Builder]] for constructing a [[SparkSession]]. * * @since 2.0.0 */ def builder(): Builder = new Builder /** * Changes the SparkSession that will be returned in this thread and its children when * SparkSession.getOrCreate() is called. This can be used to ensure that a given thread receives * a SparkSession with an isolated session, instead of the global (first created) context. * * @since 2.0.0 */ def setActiveSession(session: SparkSession): Unit = { activeThreadSession.set(session) } /** * Clears the active SparkSession for current thread. Subsequent calls to getOrCreate will * return the first created context instead of a thread-local override. * * @since 2.0.0 */ def clearActiveSession(): Unit = { activeThreadSession.remove() } /** * Sets the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def setDefaultSession(session: SparkSession): Unit = { defaultSession.set(session) } /** * Clears the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def clearDefaultSession(): Unit = { defaultSession.set(null) } /** * Returns the active SparkSession for the current thread, returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(activeThreadSession.get) } } /** * Returns the default SparkSession that is returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getDefaultSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(defaultSession.get) } } /** * Returns the currently active SparkSession, otherwise the default one. If there is no default * SparkSession, throws an exception. * * @since 2.4.0 */ def active: SparkSession = { getActiveSession.getOrElse(getDefaultSession.getOrElse( throw new IllegalStateException("No active or default Spark session found"))) } /** * Apply modifiable settings to an existing [[SparkSession]]. This method are used * both in Scala and Python, so put this under [[SparkSession]] object. */ private[sql] def applyModifiableSettings( session: SparkSession, options: java.util.HashMap[String, String]): Unit = { // Lazy val to avoid an unnecessary session state initialization lazy val conf = session.sessionState.conf val dedupOptions = if (options.isEmpty) Map.empty[String, String] else ( options.asScala.toSet -- conf.getAllConfs.toSet).toMap val (staticConfs, otherConfs) = dedupOptions.partition(kv => SQLConf.isStaticConfigKey(kv._1)) otherConfs.foreach { case (k, v) => conf.setConfString(k, v) } // Note that other runtime SQL options, for example, for other third-party datasource // can be marked as an ignored configuration here. val maybeIgnoredConfs = otherConfs.filterNot { case (k, _) => conf.isModifiable(k) } if (staticConfs.nonEmpty || maybeIgnoredConfs.nonEmpty) { logWarning( "Using an existing Spark session; only runtime SQL configurations will take effect.") } if (staticConfs.nonEmpty) { logDebug("Ignored static SQL configurations:\n " + conf.redactOptions(staticConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } if (maybeIgnoredConfs.nonEmpty) { // Only print out non-static and non-runtime SQL configurations. // Note that this might show core configurations or source specific // options defined in the third-party datasource. logDebug("Configurations that might not take effect:\n " + conf.redactOptions( maybeIgnoredConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } } /** * Returns a cloned SparkSession with all specified configurations disabled, or * the original SparkSession if all configurations are already disabled. */ private[sql] def getOrCloneSessionWithConfigsOff( session: SparkSession, configurations: Seq[ConfigEntry[Boolean]]): SparkSession = { val configsEnabled = configurations.filter(session.conf.get[Boolean]) if (configsEnabled.isEmpty) { session } else { val newSession = session.cloneSession() configsEnabled.foreach(conf => { newSession.conf.set(conf, false) }) newSession } } //////////////////////////////////////////////////////////////////////////////////////// // Private methods from now on //////////////////////////////////////////////////////////////////////////////////////// private val listenerRegistered: AtomicBoolean = new AtomicBoolean(false) /** Register the AppEnd listener onto the Context */ private def registerContextListener(sparkContext: SparkContext): Unit = { if (!listenerRegistered.get()) { sparkContext.addSparkListener(new SparkListener { override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { defaultSession.set(null) listenerRegistered.set(false) } }) listenerRegistered.set(true) } } /** The active SparkSession for the current thread. */ private val activeThreadSession = new InheritableThreadLocal[SparkSession] /** Reference to the root SparkSession. */ private val defaultSession = new AtomicReference[SparkSession] private val HIVE_SESSION_STATE_BUILDER_CLASS_NAME = "org.apache.spark.sql.hive.HiveSessionStateBuilder" private def sessionStateClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_SESSION_STATE_BUILDER_CLASS_NAME case "in-memory" => classOf[SessionStateBuilder].getCanonicalName } } private def assertOnDriver(): Unit = { if (TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkSession should only be created and accessed on the driver.") } } /** * Helper method to create an instance of `SessionState` based on `className` from conf. * The result is either `SessionState` or a Hive based `SessionState`. */ private def instantiateSessionState( className: String, sparkSession: SparkSession): SessionState = { try { // invoke new [Hive]SessionStateBuilder( // SparkSession, // Option[SessionState]) val clazz = Utils.classForName(className) val ctor = clazz.getConstructors.head ctor.newInstance(sparkSession, None).asInstanceOf[BaseSessionStateBuilder].build() } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Error while instantiating '$className':", e) } } /** * @return true if Hive classes can be loaded, otherwise false. */ private[spark] def hiveClassesArePresent: Boolean = { try { Utils.classForName(HIVE_SESSION_STATE_BUILDER_CLASS_NAME) Utils.classForName("org.apache.hadoop.hive.conf.HiveConf") true } catch { case _: ClassNotFoundException | _: NoClassDefFoundError => false } } private[spark] def cleanupAnyExistingSession(): Unit = { val session = getActiveSession.orElse(getDefaultSession) if (session.isDefined) { logWarning( s"""An existing Spark session exists as the active or default session. |This probably means another suite leaked it. Attempting to stop it before continuing. |This existing Spark session was created at: | |${session.get.creationSite.longForm} | """.stripMargin) session.get.stop() SparkSession.clearActiveSession() SparkSession.clearDefaultSession() } } /** * Initialize extensions specified in [[StaticSQLConf]]. The classes will be applied to the * extensions passed into this function. */ private def applyExtensions( sparkContext: SparkContext, extensions: SparkSessionExtensions): SparkSessionExtensions = { val extensionConfClassNames = sparkContext.getConf.get(StaticSQLConf.SPARK_SESSION_EXTENSIONS) .getOrElse(Seq.empty) extensionConfClassNames.foreach { extensionConfClassName => try { val extensionConfClass = Utils.classForName(extensionConfClassName) val extensionConf = extensionConfClass.getConstructor().newInstance() .asInstanceOf[SparkSessionExtensions => Unit] extensionConf(extensions) } catch { // Ignore the error if we cannot find the class or when the class has the wrong type. case e@(_: ClassCastException | _: ClassNotFoundException | _: NoClassDefFoundError) => logWarning(s"Cannot use $extensionConfClassName to configure session extensions.", e) } } extensions } /** * Load extensions from [[ServiceLoader]] and use them */ private def loadExtensions(extensions: SparkSessionExtensions): Unit = { val loader = ServiceLoader.load(classOf[SparkSessionExtensionsProvider], Utils.getContextOrSparkClassLoader) val loadedExts = loader.iterator() while (loadedExts.hasNext) { try { val ext = loadedExts.next() ext(extensions) } catch { case e: Throwable => logWarning("Failed to load session extension", e) } } }} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.spark.sqlimport java.io.Closeableimport java.util.{ServiceLoader, UUID}import java.util.concurrent.TimeUnit._import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}import scala.collection.JavaConverters._import scala.reflect.runtime.universe.TypeTagimport scala.util.control.NonFatalimport org.apache.spark.{SPARK_VERSION, SparkConf, SparkContext, TaskContext}import org.apache.spark.annotation.{DeveloperApi, Experimental, Stable, Unstable}import org.apache.spark.api.java.JavaRDDimport org.apache.spark.internal.Loggingimport org.apache.spark.internal.config.{ConfigEntry, EXECUTOR_ALLOW_SPARK_CONTEXT}import org.apache.spark.rdd.RDDimport org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}import org.apache.spark.sql.catalog.Catalogimport org.apache.spark.sql.catalyst._import org.apache.spark.sql.catalyst.analysis.{NameParameterizedQuery, PosParameterizedQuery, UnresolvedRelation}import org.apache.spark.sql.catalyst.encoders._import org.apache.spark.sql.catalyst.expressions.AttributeReferenceimport org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Range}import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributesimport org.apache.spark.sql.catalyst.util.CharVarcharUtilsimport org.apache.spark.sql.connector.ExternalCommandRunnerimport org.apache.spark.sql.errors.QueryCompilationErrorsimport org.apache.spark.sql.execution._import org.apache.spark.sql.execution.command.ExternalCommandExecutorimport org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}import org.apache.spark.sql.functions.litimport org.apache.spark.sql.internal._import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATIONimport org.apache.spark.sql.sources.BaseRelationimport org.apache.spark.sql.streaming._import org.apache.spark.sql.types.{DataType, StructType}import org.apache.spark.sql.util.ExecutionListenerManagerimport org.apache.spark.util.{CallSite, Utils}/** * The entry point to programming Spark with the Dataset and DataFrame API. * * In environments that this has been created upfront (e.g. REPL, notebooks), use the builder * to get an existing session: * * {{{ * SparkSession.builder().getOrCreate() * }}} * * The builder can also be used to create a new session: * * {{{ * SparkSession.builder * .master("local") * .appName("Word Count") * .config("spark.some.config.option", "some-value") * .getOrCreate() * }}} * * @param sparkContext The Spark context associated with this Spark session. * @param existingSharedState If supplied, use the existing shared state * instead of creating a new one. * @param parentSessionState If supplied, inherit all session state (i.e. temporary * views, SQL config, UDFs etc) from parent. */@Stableclass SparkSession private( @transient val sparkContext: SparkContext, @transient private val existingSharedState: Option[SharedState], @transient private val parentSessionState: Option[SessionState], @transient private[sql] val extensions: SparkSessionExtensions, @transient private[sql] val initialSessionOptions: Map[String, String]) extends Serializable with Closeable with Logging { self => // The call site where this SparkSession was constructed. private val creationSite: CallSite = Utils.getCallSite() /** * Constructor used in Pyspark. Contains explicit application of Spark Session Extensions * which otherwise only occurs during getOrCreate. We cannot add this to the default constructor * since that would cause every new session to reinvoke Spark Session Extensions on the currently * running extensions. */ private[sql] def this( sc: SparkContext, initialSessionOptions: java.util.HashMap[String, String]) = { this(sc, None, None, SparkSession.applyExtensions(sc, new SparkSessionExtensions), initialSessionOptions.asScala.toMap) } private[sql] def this(sc: SparkContext) = this(sc, new java.util.HashMap[String, String]()) private[sql] val sessionUUID: String = UUID.randomUUID.toString sparkContext.assertNotStopped() // If there is no active SparkSession, uses the default SQL conf. Otherwise, use the session's. SQLConf.setSQLConfGetter(() => { SparkSession.getActiveSession.filterNot(_.sparkContext.isStopped).map(_.sessionState.conf) .getOrElse(SQLConf.getFallbackConf) }) /** * The version of Spark on which this application is running. * * @since 2.0.0 */ def version: String = SPARK_VERSION /* ----------------------- * | Session-related state | * ----------------------- */ /** * State shared across sessions, including the `SparkContext`, cached data, listener, * and a catalog that interacts with external systems. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sharedState: SharedState = { existingSharedState.getOrElse(new SharedState(sparkContext, initialSessionOptions)) } /** * State isolated across sessions, including SQL configurations, temporary tables, registered * functions, and everything else that accepts a [[org.apache.spark.sql.internal.SQLConf]]. * If `parentSessionState` is not null, the `SessionState` will be a copy of the parent. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sessionState: SessionState = { parentSessionState .map(_.clone(this)) .getOrElse { val state = SparkSession.instantiateSessionState( SparkSession.sessionStateClassName(sharedState.conf), self) state } } /** * A wrapped version of this session in the form of a [[SQLContext]], for backward compatibility. * * @since 2.0.0 */ @transient val sqlContext: SQLContext = new SQLContext(this) /** * Runtime configuration interface for Spark. * * This is the interface through which the user can get and set all Spark and Hadoop * configurations that are relevant to Spark SQL. When getting the value of a config, * this defaults to the value set in the underlying `SparkContext`, if any. * * @since 2.0.0 */ @transient lazy val conf: RuntimeConfig = new RuntimeConfig(sessionState.conf) /** * An interface to register custom [[org.apache.spark.sql.util.QueryExecutionListener]]s * that listen for execution metrics. * * @since 2.0.0 */ def listenerManager: ExecutionListenerManager = sessionState.listenerManager /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @since 2.0.0 */ @Experimental @Unstable def experimental: ExperimentalMethods = sessionState.experimentalMethods /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sparkSession.udf.register("myUDF", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sparkSession.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @note The user-defined functions must be deterministic. Due to optimization, * duplicate invocations may be eliminated or the function may even be invoked more times than * it is present in the query. * * @since 2.0.0 */ def udf: UDFRegistration = sessionState.udfRegistration def udtf: UDTFRegistration = sessionState.udtfRegistration /** * Returns a `StreamingQueryManager` that allows managing all the * `StreamingQuery`s active on `this`. * * @since 2.0.0 */ @Unstable def streams: StreamingQueryManager = sessionState.streamingQueryManager /** * Start a new session with isolated SQL configurations, temporary tables, registered * functions are isolated, but sharing the underlying `SparkContext` and cached data. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. * * @since 2.0.0 */ def newSession(): SparkSession = { new SparkSession( sparkContext, Some(sharedState), parentSessionState = None, extensions, initialSessionOptions) } /** * Create an identical copy of this `SparkSession`, sharing the underlying `SparkContext` * and shared state. All the state of this session (i.e. SQL configurations, temporary tables, * registered functions) is copied over, and the cloned session is set up with the same shared * state as this session. The cloned session is independent of this session, that is, any * non-global change in either session is not reflected in the other. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. */ private[sql] def cloneSession(): SparkSession = { val result = new SparkSession( sparkContext, Some(sharedState), Some(sessionState), extensions, Map.empty) result.sessionState // force copy of SessionState result } /* --------------------------------- * | Methods for creating DataFrames | * --------------------------------- */ /** * Returns a `DataFrame` with no rows or columns. * * @since 2.0.0 */ @transient lazy val emptyDataFrame: DataFrame = Dataset.ofRows(self, LocalRelation()) /** * Creates a new [[Dataset]] of type T containing zero elements. * * @since 2.0.0 */ def emptyDataset[T: Encoder]: Dataset[T] = { val encoder = implicitly[Encoder[T]] new Dataset(self, LocalRelation(encoder.schema), encoder) } /** * Creates a `DataFrame` from an RDD of Product (e.g. case classes, tuples). * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = withActive { val encoder = Encoders.product[A] Dataset.ofRows(self, ExternalRDD(rdd, self)(encoder)) } /** * Creates a `DataFrame` from a local Seq of Product. * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = withActive { val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = toAttributes(schema) Dataset.ofRows(self, LocalRelation.fromProduct(attributeSeq, data)) } /** * :: DeveloperApi :: * Creates a `DataFrame` from an `RDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sparkSession = new org.apache.spark.sql.SparkSession(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sparkSession.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.createOrReplaceTempView("people") * sparkSession.sql("select name from people").collect.foreach(println) * }}} * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val encoder = ExpressionEncoder(replaced) val toRow = encoder.createSerializer() val catalystRows = rowRDD.map(toRow) internalCreateDataFrame(catalystRows.setName(rowRDD.name), schema) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `JavaRDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] createDataFrame(rowRDD.rdd, replaced) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `java.util.List` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided List matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] Dataset.ofRows(self, LocalRelation.fromExternalRows(toAttributes(replaced), rows.asScala.toSeq)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = withActive { val attributeSeq: Seq[AttributeReference] = getSchema(beanClass) val className = beanClass.getName val rowRdd = rdd.mapPartitions { iter => // BeanInfo is not serializable so we must rediscover it remotely for each partition. SQLContext.beansToRows(iter, Utils.classForName(className), attributeSeq) } Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRdd.setName(rdd.name))(self)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd.rdd, beanClass) } /** * Applies a schema to a List of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @since 1.6.0 */ def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = withActive { val attrSeq = getSchema(beanClass) val rows = SQLContext.beansToRows(data.asScala.iterator, beanClass, attrSeq) Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq)) } /** * Convert a `BaseRelation` created for external data sources into a `DataFrame`. * * @since 2.0.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { Dataset.ofRows(self, LogicalRelation(baseRelation)) } /* ------------------------------- * | Methods for creating DataSets | * ------------------------------- */ /** * Creates a [[Dataset]] from a local Seq of data of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Example == * * {{{ * * import spark.implicits._ * case class Person(name: String, age: Long) * val data = Seq(Person("Michael", 29), Person("Andy", 30), Person("Justin", 19)) * val ds = spark.createDataset(data) * * ds.show() * // +-------+---+ * // | name|age| * // +-------+---+ * // |Michael| 29| * // | Andy| 30| * // | Justin| 19| * // +-------+---+ * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = { val enc = encoderFor[T] val toRow = enc.createSerializer() val attributes = toAttributes(enc.schema) val encoded = data.map(d => toRow(d).copy()) val plan = new LocalRelation(attributes, encoded) Dataset[T](self, plan) } /** * Creates a [[Dataset]] from an RDD of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * @since 2.0.0 */ def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = { Dataset[T](self, ExternalRDD(data, self)) } /** * Creates a [[Dataset]] from a `java.util.List` of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Java Example == * * {{{ * List<String> data = Arrays.asList("hello", "world"); * Dataset<String> ds = spark.createDataset(data, Encoders.STRING()); * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = { createDataset(data.asScala.toSeq) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from 0 to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(end: Long): Dataset[java.lang.Long] = range(0, end) /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(start: Long, end: Long): Dataset[java.lang.Long] = { range(start, end, step = 1, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long): Dataset[java.lang.Long] = { range(start, end, step, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value, with partition number * specified. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = { new Dataset(self, Range(start, end, step, numPartitions), Encoders.LONG) } /** * Creates a `DataFrame` from an `RDD[InternalRow]`. */ private[sql] def internalCreateDataFrame( catalystRows: RDD[InternalRow], schema: StructType, isStreaming: Boolean = false): DataFrame = { // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val logicalPlan = LogicalRDD( toAttributes(schema), catalystRows, isStreaming = isStreaming)(self) Dataset.ofRows(self, logicalPlan) } /* ------------------------- * | Catalog-related methods | * ------------------------- */ /** * Interface through which the user may create, drop, alter or query underlying * databases, tables, functions etc. * * @since 2.0.0 */ @transient lazy val catalog: Catalog = new CatalogImpl(self) /** * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, * the returned DataFrame is simply the query plan of the view, which can either be a batch or * streaming query plan. * * @param tableName is either a qualified or unqualified name that designates a table or view. * If a database is specified, it identifies the table/view from the database. * Otherwise, it first attempts to find a temporary view with the given name * and then match the table/view from the current database. * Note that, the global temporary view database is also valid here. * @since 2.0.0 */ def table(tableName: String): DataFrame = { read.table(tableName) } private[sql] def table(tableIdent: TableIdentifier): DataFrame = { Dataset.ofRows(self, UnresolvedRelation(tableIdent)) } /* ----------------- * | Everything else | * ----------------- */ /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql(sqlText: String, args: Array[_], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { PosParameterizedQuery(parsedPlan, args.map(lit(_).expr)) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.5.0 */ @Experimental def sql(sqlText: String, args: Array[_]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql( sqlText: String, args: Map[String, Any], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { NameParameterizedQuery(parsedPlan, args.mapValues(lit(_).expr).toMap) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: Map[String, Any]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: java.util.Map[String, Any]): DataFrame = { sql(sqlText, args.asScala.toMap) } /** * Executes a SQL query using Spark, returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @since 2.0.0 */ def sql(sqlText: String): DataFrame = sql(sqlText, Map.empty[String, Any]) /** * Execute an arbitrary string command inside an external execution engine rather than Spark. * This could be useful when user wants to execute some commands out of Spark. For * example, executing custom DDL/DML command for JDBC, creating index for ElasticSearch, * creating cores for Solr and so on. * * The command will be eagerly executed after this method is called and the returned * DataFrame will contain the output of the command(if any). * * @param runner The class name of the runner that implements `ExternalCommandRunner`. * @param command The target command to be executed * @param options The options for the runner. * * @since 3.0.0 */ @Unstable def executeCommand(runner: String, command: String, options: Map[String, String]): DataFrame = { DataSource.lookupDataSource(runner, sessionState.conf) match { case source if classOf[ExternalCommandRunner].isAssignableFrom(source) => Dataset.ofRows(self, ExternalCommandExecutor( source.newInstance().asInstanceOf[ExternalCommandRunner], command, options)) case _ => throw QueryCompilationErrors.commandExecutionInRunnerUnsupportedError(runner) } } /** * Returns a [[DataFrameReader]] that can be used to read non-streaming data in as a * `DataFrame`. * {{{ * sparkSession.read.parquet("/path/to/file.parquet") * sparkSession.read.schema(schema).json("/path/to/file.json") * }}} * * @since 2.0.0 */ def read: DataFrameReader = new DataFrameReader(self) /** * Returns a `DataStreamReader` that can be used to read streaming data in as a `DataFrame`. * {{{ * sparkSession.readStream.parquet("/path/to/directory/of/parquet/files") * sparkSession.readStream.schema(schema).json("/path/to/directory/of/json/files") * }}} * * @since 2.0.0 */ def readStream: DataStreamReader = new DataStreamReader(self) /** * Executes some code block and prints to stdout the time taken to execute the block. This is * available in Scala only and is used primarily for interactive testing and debugging. * * @since 2.1.0 */ def time[T](f: => T): T = { val start = System.nanoTime() val ret = f val end = System.nanoTime() // scalastyle:off println println(s"Time taken: ${NANOSECONDS.toMillis(end - start)} ms") // scalastyle:on println ret } // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into `DataFrame`s. * * {{{ * val sparkSession = SparkSession.builder.getOrCreate() * import sparkSession.implicits._ * }}} * * @since 2.0.0 */ object implicits extends SQLImplicits with Serializable { protected override def _sqlContext: SQLContext = SparkSession.this.sqlContext } // scalastyle:on /** * Stop the underlying `SparkContext`. * * @since 2.0.0 */ def stop(): Unit = { sparkContext.stop() } /** * Synonym for `stop()`. * * @since 2.1.0 */ override def close(): Unit = stop() /** * Parses the data type in our internal string representation. The data type string should * have the same format as the one generated by `toString` in scala. * It is only used by PySpark. */ protected[sql] def parseDataType(dataTypeString: String): DataType = { DataType.fromJson(dataTypeString) } /** * Apply a schema defined by the schemaString to an RDD. It is only used by PySpark. */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schemaString: String): DataFrame = { val schema = DataType.fromJson(schemaString).asInstanceOf[StructType] applySchemaToPythonRDD(rdd, schema) } /** * Apply `schema` to an RDD. * * @note Used by PySpark only */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schema: StructType): DataFrame = { val rowRdd = rdd.mapPartitions { iter => val fromJava = python.EvaluatePython.makeFromJava(schema) iter.map(r => fromJava(r).asInstanceOf[InternalRow]) } internalCreateDataFrame(rowRdd, schema) } /** * Returns a Catalyst Schema for the given java bean class. */ private def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } } /** * Execute a block of code with the this session set as the active session, and restore the * previous session on completion. */ private[sql] def withActive[T](block: => T): T = { // Use the active session thread local directly to make sure we get the session that is actually // set and not the default session. This to prevent that we promote the default session to the // active session once we are done. val old = SparkSession.activeThreadSession.get() SparkSession.setActiveSession(this) try block finally { SparkSession.setActiveSession(old) } } private[sql] def leafNodeDefaultParallelism: Int = { conf.get(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM).getOrElse(sparkContext.defaultParallelism) }}@Stableobject SparkSession extends Logging { /** * Builder for [[SparkSession]]. */ @Stable class Builder extends Logging { private[this] val options = new scala.collection.mutable.HashMap[String, String] private[this] val extensions = new SparkSessionExtensions private[this] var userSuppliedContext: Option[SparkContext] = None private[spark] def sparkContext(sparkContext: SparkContext): Builder = synchronized { userSuppliedContext = Option(sparkContext) this } /** * Sets a name for the application, which will be shown in the Spark web UI. * If no application name is set, a randomly generated name will be used. * * @since 2.0.0 */ def appName(name: String): Builder = config("spark.app.name", name) /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: String): Builder = synchronized { options += key -> value this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Long): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Double): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Boolean): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: Map[String, Any]): Builder = synchronized { map.foreach { kv: (String, Any) => { options += kv._1 -> kv._2.toString } } this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: java.util.Map[String, Any]): Builder = synchronized { config(map.asScala.toMap) } /** * Sets a list of config options based on the given `SparkConf`. * * @since 2.0.0 */ def config(conf: SparkConf): Builder = synchronized { conf.getAll.foreach { case (k, v) => options += k -> v } this } /** * Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. * * @since 2.0.0 */ def master(master: String): Builder = config("spark.master", master) /** * Enables Hive support, including connectivity to a persistent Hive metastore, support for * Hive serdes, and Hive user-defined functions. * * @since 2.0.0 */ def enableHiveSupport(): Builder = synchronized { if (hiveClassesArePresent) { config(CATALOG_IMPLEMENTATION.key, "hive") } else { throw new IllegalArgumentException( "Unable to instantiate SparkSession with Hive support because " + "Hive classes are not found.") } } /** * Inject extensions into the [[SparkSession]]. This allows a user to add Analyzer rules, * Optimizer rules, Planning Strategies or a customized parser. * * @since 2.2.0 */ def withExtensions(f: SparkSessionExtensions => Unit): Builder = synchronized { f(extensions) this } /** * Gets an existing [[SparkSession]] or, if there is no existing one, creates a new * one based on the options set in this builder. * * This method first checks whether there is a valid thread-local SparkSession, * and if yes, return that one. It then checks whether there is a valid global * default SparkSession, and if yes, return that one. If no valid global default * SparkSession exists, the method creates a new SparkSession and assigns the * newly created SparkSession as the global default. * * In case an existing SparkSession is returned, the non-static config options specified in * this builder will be applied to the existing SparkSession. * * @since 2.0.0 */ def getOrCreate(): SparkSession = synchronized { val sparkConf = new SparkConf() options.foreach { case (k, v) => sparkConf.set(k, v) } if (!sparkConf.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { assertOnDriver() } // Get the session from current thread's active session. var session = activeThreadSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // Global synchronization so we will only set the default session once. SparkSession.synchronized { // If the current thread does not have an active session, get it from the global session. session = defaultSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // No active nor global default session. Create a new one. val sparkContext = userSuppliedContext.getOrElse { // set a random app name if not given. if (!sparkConf.contains("spark.app.name")) { sparkConf.setAppName(java.util.UUID.randomUUID().toString) } SparkContext.getOrCreate(sparkConf) // Do not update `SparkConf` for existing `SparkContext`, as it's shared by all sessions. } loadExtensions(extensions) applyExtensions(sparkContext, extensions) session = new SparkSession(sparkContext, None, None, extensions, options.toMap) setDefaultSession(session) setActiveSession(session) registerContextListener(sparkContext) } return session } } /** * Creates a [[SparkSession.Builder]] for constructing a [[SparkSession]]. * * @since 2.0.0 */ def builder(): Builder = new Builder /** * Changes the SparkSession that will be returned in this thread and its children when * SparkSession.getOrCreate() is called. This can be used to ensure that a given thread receives * a SparkSession with an isolated session, instead of the global (first created) context. * * @since 2.0.0 */ def setActiveSession(session: SparkSession): Unit = { activeThreadSession.set(session) } /** * Clears the active SparkSession for current thread. Subsequent calls to getOrCreate will * return the first created context instead of a thread-local override. * * @since 2.0.0 */ def clearActiveSession(): Unit = { activeThreadSession.remove() } /** * Sets the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def setDefaultSession(session: SparkSession): Unit = { defaultSession.set(session) } /** * Clears the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def clearDefaultSession(): Unit = { defaultSession.set(null) } /** * Returns the active SparkSession for the current thread, returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(activeThreadSession.get) } } /** * Returns the default SparkSession that is returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getDefaultSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(defaultSession.get) } } /** * Returns the currently active SparkSession, otherwise the default one. If there is no default * SparkSession, throws an exception. * * @since 2.4.0 */ def active: SparkSession = { getActiveSession.getOrElse(getDefaultSession.getOrElse( throw new IllegalStateException("No active or default Spark session found"))) } /** * Apply modifiable settings to an existing [[SparkSession]]. This method are used * both in Scala and Python, so put this under [[SparkSession]] object. */ private[sql] def applyModifiableSettings( session: SparkSession, options: java.util.HashMap[String, String]): Unit = { // Lazy val to avoid an unnecessary session state initialization lazy val conf = session.sessionState.conf val dedupOptions = if (options.isEmpty) Map.empty[String, String] else ( options.asScala.toSet -- conf.getAllConfs.toSet).toMap val (staticConfs, otherConfs) = dedupOptions.partition(kv => SQLConf.isStaticConfigKey(kv._1)) otherConfs.foreach { case (k, v) => conf.setConfString(k, v) } // Note that other runtime SQL options, for example, for other third-party datasource // can be marked as an ignored configuration here. val maybeIgnoredConfs = otherConfs.filterNot { case (k, _) => conf.isModifiable(k) } if (staticConfs.nonEmpty || maybeIgnoredConfs.nonEmpty) { logWarning( "Using an existing Spark session; only runtime SQL configurations will take effect.") } if (staticConfs.nonEmpty) { logDebug("Ignored static SQL configurations:\n " + conf.redactOptions(staticConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } if (maybeIgnoredConfs.nonEmpty) { // Only print out non-static and non-runtime SQL configurations. // Note that this might show core configurations or source specific // options defined in the third-party datasource. logDebug("Configurations that might not take effect:\n " + conf.redactOptions( maybeIgnoredConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } } /** * Returns a cloned SparkSession with all specified configurations disabled, or * the original SparkSession if all configurations are already disabled. */ private[sql] def getOrCloneSessionWithConfigsOff( session: SparkSession, configurations: Seq[ConfigEntry[Boolean]]): SparkSession = { val configsEnabled = configurations.filter(session.conf.get[Boolean]) if (configsEnabled.isEmpty) { session } else { val newSession = session.cloneSession() configsEnabled.foreach(conf => { newSession.conf.set(conf, false) }) newSession } } //////////////////////////////////////////////////////////////////////////////////////// // Private methods from now on //////////////////////////////////////////////////////////////////////////////////////// private val listenerRegistered: AtomicBoolean = new AtomicBoolean(false) /** Register the AppEnd listener onto the Context */ private def registerContextListener(sparkContext: SparkContext): Unit = { if (!listenerRegistered.get()) { sparkContext.addSparkListener(new SparkListener { override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { defaultSession.set(null) listenerRegistered.set(false) } }) listenerRegistered.set(true) } } /** The active SparkSession for the current thread. */ private val activeThreadSession = new InheritableThreadLocal[SparkSession] /** Reference to the root SparkSession. */ private val defaultSession = new AtomicReference[SparkSession] private val HIVE_SESSION_STATE_BUILDER_CLASS_NAME = "org.apache.spark.sql.hive.HiveSessionStateBuilder" private def sessionStateClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_SESSION_STATE_BUILDER_CLASS_NAME case "in-memory" => classOf[SessionStateBuilder].getCanonicalName } } private def assertOnDriver(): Unit = { if (TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkSession should only be created and accessed on the driver.") } } /** * Helper method to create an instance of `SessionState` based on `className` from conf. * The result is either `SessionState` or a Hive based `SessionState`. */ private def instantiateSessionState( className: String, sparkSession: SparkSession): SessionState = { try { // invoke new [Hive]SessionStateBuilder( // SparkSession, // Option[SessionState]) val clazz = Utils.classForName(className) val ctor = clazz.getConstructors.head ctor.newInstance(sparkSession, None).asInstanceOf[BaseSessionStateBuilder].build() } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Error while instantiating '$className':", e) } } /** * @return true if Hive classes can be loaded, otherwise false. */ private[spark] def hiveClassesArePresent: Boolean = { try { Utils.classForName(HIVE_SESSION_STATE_BUILDER_CLASS_NAME) Utils.classForName("org.apache.hadoop.hive.conf.HiveConf") true } catch { case _: ClassNotFoundException | _: NoClassDefFoundError => false } } private[spark] def cleanupAnyExistingSession(): Unit = { val session = getActiveSession.orElse(getDefaultSession) if (session.isDefined) { logWarning( s"""An existing Spark session exists as the active or default session. |This probably means another suite leaked it. Attempting to stop it before continuing. |This existing Spark session was created at: | |${session.get.creationSite.longForm} | """.stripMargin) session.get.stop() SparkSession.clearActiveSession() SparkSession.clearDefaultSession() } } /** * Initialize extensions specified in [[StaticSQLConf]]. The classes will be applied to the * extensions passed into this function. */ private def applyExtensions( sparkContext: SparkContext, extensions: SparkSessionExtensions): SparkSessionExtensions = { val extensionConfClassNames = sparkContext.getConf.get(StaticSQLConf.SPARK_SESSION_EXTENSIONS) .getOrElse(Seq.empty) extensionConfClassNames.foreach { extensionConfClassName => try { val extensionConfClass = Utils.classForName(extensionConfClassName) val extensionConf = extensionConfClass.getConstructor().newInstance() .asInstanceOf[SparkSessionExtensions => Unit] extensionConf(extensions) } catch { // Ignore the error if we cannot find the class or when the class has the wrong type. case e@(_: ClassCastException | _: ClassNotFoundException | _: NoClassDefFoundError) => logWarning(s"Cannot use $extensionConfClassName to configure session extensions.", e) } } extensions } /** * Load extensions from [[ServiceLoader]] and use them */ private def loadExtensions(extensions: SparkSessionExtensions): Unit = { val loader = ServiceLoader.load(classOf[SparkSessionExtensionsProvider], Utils.getContextOrSparkClassLoader) val loadedExts = loader.iterator() while (loadedExts.hasNext) { try { val ext = loadedExts.next() ext(extensions) } catch { case e: Throwable => logWarning("Failed to load session extension", e) } } }} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.spark.sqlimport java.io.Closeableimport java.util.{ServiceLoader, UUID}import java.util.concurrent.TimeUnit._import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}import scala.collection.JavaConverters._import scala.reflect.runtime.universe.TypeTagimport scala.util.control.NonFatalimport org.apache.spark.{SPARK_VERSION, SparkConf, SparkContext, TaskContext}import org.apache.spark.annotation.{DeveloperApi, Experimental, Stable, Unstable}import org.apache.spark.api.java.JavaRDDimport org.apache.spark.internal.Loggingimport org.apache.spark.internal.config.{ConfigEntry, EXECUTOR_ALLOW_SPARK_CONTEXT}import org.apache.spark.rdd.RDDimport org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}import org.apache.spark.sql.catalog.Catalogimport org.apache.spark.sql.catalyst._import org.apache.spark.sql.catalyst.analysis.{NameParameterizedQuery, PosParameterizedQuery, UnresolvedRelation}import org.apache.spark.sql.catalyst.encoders._import org.apache.spark.sql.catalyst.expressions.AttributeReferenceimport org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Range}import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributesimport org.apache.spark.sql.catalyst.util.CharVarcharUtilsimport org.apache.spark.sql.connector.ExternalCommandRunnerimport org.apache.spark.sql.errors.QueryCompilationErrorsimport org.apache.spark.sql.execution._import org.apache.spark.sql.execution.command.ExternalCommandExecutorimport org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}import org.apache.spark.sql.functions.litimport org.apache.spark.sql.internal._import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATIONimport org.apache.spark.sql.sources.BaseRelationimport org.apache.spark.sql.streaming._import org.apache.spark.sql.types.{DataType, StructType}import org.apache.spark.sql.util.ExecutionListenerManagerimport org.apache.spark.util.{CallSite, Utils}/** * The entry point to programming Spark with the Dataset and DataFrame API. * * In environments that this has been created upfront (e.g. REPL, notebooks), use the builder * to get an existing session: * * {{{ * SparkSession.builder().getOrCreate() * }}} * * The builder can also be used to create a new session: * * {{{ * SparkSession.builder * .master("local") * .appName("Word Count") * .config("spark.some.config.option", "some-value") * .getOrCreate() * }}} * * @param sparkContext The Spark context associated with this Spark session. * @param existingSharedState If supplied, use the existing shared state * instead of creating a new one. * @param parentSessionState If supplied, inherit all session state (i.e. temporary * views, SQL config, UDFs etc) from parent. */@Stableclass SparkSession private( @transient val sparkContext: SparkContext, @transient private val existingSharedState: Option[SharedState], @transient private val parentSessionState: Option[SessionState], @transient private[sql] val extensions: SparkSessionExtensions, @transient private[sql] val initialSessionOptions: Map[String, String]) extends Serializable with Closeable with Logging { self => // The call site where this SparkSession was constructed. private val creationSite: CallSite = Utils.getCallSite() /** * Constructor used in Pyspark. Contains explicit application of Spark Session Extensions * which otherwise only occurs during getOrCreate. We cannot add this to the default constructor * since that would cause every new session to reinvoke Spark Session Extensions on the currently * running extensions. */ private[sql] def this( sc: SparkContext, initialSessionOptions: java.util.HashMap[String, String]) = { this(sc, None, None, SparkSession.applyExtensions(sc, new SparkSessionExtensions), initialSessionOptions.asScala.toMap) } private[sql] def this(sc: SparkContext) = this(sc, new java.util.HashMap[String, String]()) private[sql] val sessionUUID: String = UUID.randomUUID.toString sparkContext.assertNotStopped() // If there is no active SparkSession, uses the default SQL conf. Otherwise, use the session's. SQLConf.setSQLConfGetter(() => { SparkSession.getActiveSession.filterNot(_.sparkContext.isStopped).map(_.sessionState.conf) .getOrElse(SQLConf.getFallbackConf) }) /** * The version of Spark on which this application is running. * * @since 2.0.0 */ def version: String = SPARK_VERSION /* ----------------------- * | Session-related state | * ----------------------- */ /** * State shared across sessions, including the `SparkContext`, cached data, listener, * and a catalog that interacts with external systems. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sharedState: SharedState = { existingSharedState.getOrElse(new SharedState(sparkContext, initialSessionOptions)) } /** * State isolated across sessions, including SQL configurations, temporary tables, registered * functions, and everything else that accepts a [[org.apache.spark.sql.internal.SQLConf]]. * If `parentSessionState` is not null, the `SessionState` will be a copy of the parent. * * This is internal to Spark and there is no guarantee on interface stability. * * @since 2.2.0 */ @Unstable @transient lazy val sessionState: SessionState = { parentSessionState .map(_.clone(this)) .getOrElse { val state = SparkSession.instantiateSessionState( SparkSession.sessionStateClassName(sharedState.conf), self) state } } /** * A wrapped version of this session in the form of a [[SQLContext]], for backward compatibility. * * @since 2.0.0 */ @transient val sqlContext: SQLContext = new SQLContext(this) /** * Runtime configuration interface for Spark. * * This is the interface through which the user can get and set all Spark and Hadoop * configurations that are relevant to Spark SQL. When getting the value of a config, * this defaults to the value set in the underlying `SparkContext`, if any. * * @since 2.0.0 */ @transient lazy val conf: RuntimeConfig = new RuntimeConfig(sessionState.conf) /** * An interface to register custom [[org.apache.spark.sql.util.QueryExecutionListener]]s * that listen for execution metrics. * * @since 2.0.0 */ def listenerManager: ExecutionListenerManager = sessionState.listenerManager /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @since 2.0.0 */ @Experimental @Unstable def experimental: ExperimentalMethods = sessionState.experimentalMethods /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sparkSession.udf.register("myUDF", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sparkSession.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @note The user-defined functions must be deterministic. Due to optimization, * duplicate invocations may be eliminated or the function may even be invoked more times than * it is present in the query. * * @since 2.0.0 */ def udf: UDFRegistration = sessionState.udfRegistration def udtf: UDTFRegistration = sessionState.udtfRegistration /** * Returns a `StreamingQueryManager` that allows managing all the * `StreamingQuery`s active on `this`. * * @since 2.0.0 */ @Unstable def streams: StreamingQueryManager = sessionState.streamingQueryManager /** * Start a new session with isolated SQL configurations, temporary tables, registered * functions are isolated, but sharing the underlying `SparkContext` and cached data. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. * * @since 2.0.0 */ def newSession(): SparkSession = { new SparkSession( sparkContext, Some(sharedState), parentSessionState = None, extensions, initialSessionOptions) } /** * Create an identical copy of this `SparkSession`, sharing the underlying `SparkContext` * and shared state. All the state of this session (i.e. SQL configurations, temporary tables, * registered functions) is copied over, and the cloned session is set up with the same shared * state as this session. The cloned session is independent of this session, that is, any * non-global change in either session is not reflected in the other. * * @note Other than the `SparkContext`, all shared state is initialized lazily. * This method will force the initialization of the shared state to ensure that parent * and child sessions are set up with the same shared state. If the underlying catalog * implementation is Hive, this will initialize the metastore, which may take some time. */ private[sql] def cloneSession(): SparkSession = { val result = new SparkSession( sparkContext, Some(sharedState), Some(sessionState), extensions, Map.empty) result.sessionState // force copy of SessionState result } /* --------------------------------- * | Methods for creating DataFrames | * --------------------------------- */ /** * Returns a `DataFrame` with no rows or columns. * * @since 2.0.0 */ @transient lazy val emptyDataFrame: DataFrame = Dataset.ofRows(self, LocalRelation()) /** * Creates a new [[Dataset]] of type T containing zero elements. * * @since 2.0.0 */ def emptyDataset[T: Encoder]: Dataset[T] = { val encoder = implicitly[Encoder[T]] new Dataset(self, LocalRelation(encoder.schema), encoder) } /** * Creates a `DataFrame` from an RDD of Product (e.g. case classes, tuples). * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = withActive { val encoder = Encoders.product[A] Dataset.ofRows(self, ExternalRDD(rdd, self)(encoder)) } /** * Creates a `DataFrame` from a local Seq of Product. * * @since 2.0.0 */ def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = withActive { val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType] val attributeSeq = toAttributes(schema) Dataset.ofRows(self, LocalRelation.fromProduct(attributeSeq, data)) } /** * :: DeveloperApi :: * Creates a `DataFrame` from an `RDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sparkSession = new org.apache.spark.sql.SparkSession(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sparkSession.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.createOrReplaceTempView("people") * sparkSession.sql("select name from people").collect.foreach(println) * }}} * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val encoder = ExpressionEncoder(replaced) val toRow = encoder.createSerializer() val catalystRows = rowRDD.map(toRow) internalCreateDataFrame(catalystRows.setName(rowRDD.name), schema) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `JavaRDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] createDataFrame(rowRDD.rdd, replaced) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `java.util.List` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided List matches * the provided schema. Otherwise, there will be runtime exception. * * @since 2.0.0 */ @DeveloperApi def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = withActive { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] Dataset.ofRows(self, LocalRelation.fromExternalRows(toAttributes(replaced), rows.asScala.toSeq)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = withActive { val attributeSeq: Seq[AttributeReference] = getSchema(beanClass) val className = beanClass.getName val rowRdd = rdd.mapPartitions { iter => // BeanInfo is not serializable so we must rediscover it remotely for each partition. SQLContext.beansToRows(iter, Utils.classForName(className), attributeSeq) } Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRdd.setName(rdd.name))(self)) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * * @since 2.0.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd.rdd, beanClass) } /** * Applies a schema to a List of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @since 1.6.0 */ def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = withActive { val attrSeq = getSchema(beanClass) val rows = SQLContext.beansToRows(data.asScala.iterator, beanClass, attrSeq) Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq)) } /** * Convert a `BaseRelation` created for external data sources into a `DataFrame`. * * @since 2.0.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { Dataset.ofRows(self, LogicalRelation(baseRelation)) } /* ------------------------------- * | Methods for creating DataSets | * ------------------------------- */ /** * Creates a [[Dataset]] from a local Seq of data of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Example == * * {{{ * * import spark.implicits._ * case class Person(name: String, age: Long) * val data = Seq(Person("Michael", 29), Person("Andy", 30), Person("Justin", 19)) * val ds = spark.createDataset(data) * * ds.show() * // +-------+---+ * // | name|age| * // +-------+---+ * // |Michael| 29| * // | Andy| 30| * // | Justin| 19| * // +-------+---+ * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = { val enc = encoderFor[T] val toRow = enc.createSerializer() val attributes = toAttributes(enc.schema) val encoded = data.map(d => toRow(d).copy()) val plan = new LocalRelation(attributes, encoded) Dataset[T](self, plan) } /** * Creates a [[Dataset]] from an RDD of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * @since 2.0.0 */ def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = { Dataset[T](self, ExternalRDD(data, self)) } /** * Creates a [[Dataset]] from a `java.util.List` of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Java Example == * * {{{ * List<String> data = Arrays.asList("hello", "world"); * Dataset<String> ds = spark.createDataset(data, Encoders.STRING()); * }}} * * @since 2.0.0 */ def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = { createDataset(data.asScala.toSeq) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from 0 to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(end: Long): Dataset[java.lang.Long] = range(0, end) /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with step value 1. * * @since 2.0.0 */ def range(start: Long, end: Long): Dataset[java.lang.Long] = { range(start, end, step = 1, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long): Dataset[java.lang.Long] = { range(start, end, step, numPartitions = leafNodeDefaultParallelism) } /** * Creates a [[Dataset]] with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value, with partition number * specified. * * @since 2.0.0 */ def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = { new Dataset(self, Range(start, end, step, numPartitions), Encoders.LONG) } /** * Creates a `DataFrame` from an `RDD[InternalRow]`. */ private[sql] def internalCreateDataFrame( catalystRows: RDD[InternalRow], schema: StructType, isStreaming: Boolean = false): DataFrame = { // TODO: use MutableProjection when rowRDD is another DataFrame and the applied // schema differs from the existing schema on any field data type. val logicalPlan = LogicalRDD( toAttributes(schema), catalystRows, isStreaming = isStreaming)(self) Dataset.ofRows(self, logicalPlan) } /* ------------------------- * | Catalog-related methods | * ------------------------- */ /** * Interface through which the user may create, drop, alter or query underlying * databases, tables, functions etc. * * @since 2.0.0 */ @transient lazy val catalog: Catalog = new CatalogImpl(self) /** * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, * the returned DataFrame is simply the query plan of the view, which can either be a batch or * streaming query plan. * * @param tableName is either a qualified or unqualified name that designates a table or view. * If a database is specified, it identifies the table/view from the database. * Otherwise, it first attempts to find a temporary view with the given name * and then match the table/view from the current database. * Note that, the global temporary view database is also valid here. * @since 2.0.0 */ def table(tableName: String): DataFrame = { read.table(tableName) } private[sql] def table(tableIdent: TableIdentifier): DataFrame = { Dataset.ofRows(self, UnresolvedRelation(tableIdent)) } /* ----------------- * | Everything else | * ----------------- */ /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql(sqlText: String, args: Array[_], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { PosParameterizedQuery(parsedPlan, args.map(lit(_).expr)) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting positional parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with positional parameters to execute. * @param args An array of Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, 1, "Steven", LocalDate.of(2023, 4, 2). * A value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.5.0 */ @Experimental def sql(sqlText: String, args: Array[_]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * @param tracker A tracker that can notify when query is ready for execution */ private[sql] def sql( sqlText: String, args: Map[String, Any], tracker: QueryPlanningTracker): DataFrame = withActive { val plan = tracker.measurePhase(QueryPlanningTracker.PARSING) { val parsedPlan = sessionState.sqlParser.parsePlan(sqlText) if (args.nonEmpty) { NameParameterizedQuery(parsedPlan, args.mapValues(lit(_).expr).toMap) } else { parsedPlan } } Dataset.ofRows(self, plan, tracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: Map[String, Any]): DataFrame = { sql(sqlText, args, new QueryPlanningTracker) } /** * Executes a SQL query substituting named parameters by the given arguments, * returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @param sqlText A SQL statement with named parameters to execute. * @param args A map of parameter names to Java/Scala objects that can be converted to * SQL literal expressions. See * <a href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html"> * Supported Data Types</a> for supported value types in Scala/Java. * For example, map keys: "rank", "name", "birthdate"; * map values: 1, "Steven", LocalDate.of(2023, 4, 2). * Map value can be also a `Column` of literal expression, in that case * it is taken as is. * * @since 3.4.0 */ @Experimental def sql(sqlText: String, args: java.util.Map[String, Any]): DataFrame = { sql(sqlText, args.asScala.toMap) } /** * Executes a SQL query using Spark, returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @since 2.0.0 */ def sql(sqlText: String): DataFrame = sql(sqlText, Map.empty[String, Any]) /** * Execute an arbitrary string command inside an external execution engine rather than Spark. * This could be useful when user wants to execute some commands out of Spark. For * example, executing custom DDL/DML command for JDBC, creating index for ElasticSearch, * creating cores for Solr and so on. * * The command will be eagerly executed after this method is called and the returned * DataFrame will contain the output of the command(if any). * * @param runner The class name of the runner that implements `ExternalCommandRunner`. * @param command The target command to be executed * @param options The options for the runner. * * @since 3.0.0 */ @Unstable def executeCommand(runner: String, command: String, options: Map[String, String]): DataFrame = { DataSource.lookupDataSource(runner, sessionState.conf) match { case source if classOf[ExternalCommandRunner].isAssignableFrom(source) => Dataset.ofRows(self, ExternalCommandExecutor( source.newInstance().asInstanceOf[ExternalCommandRunner], command, options)) case _ => throw QueryCompilationErrors.commandExecutionInRunnerUnsupportedError(runner) } } /** * Returns a [[DataFrameReader]] that can be used to read non-streaming data in as a * `DataFrame`. * {{{ * sparkSession.read.parquet("/path/to/file.parquet") * sparkSession.read.schema(schema).json("/path/to/file.json") * }}} * * @since 2.0.0 */ def read: DataFrameReader = new DataFrameReader(self) /** * Returns a `DataStreamReader` that can be used to read streaming data in as a `DataFrame`. * {{{ * sparkSession.readStream.parquet("/path/to/directory/of/parquet/files") * sparkSession.readStream.schema(schema).json("/path/to/directory/of/json/files") * }}} * * @since 2.0.0 */ def readStream: DataStreamReader = new DataStreamReader(self) /** * Executes some code block and prints to stdout the time taken to execute the block. This is * available in Scala only and is used primarily for interactive testing and debugging. * * @since 2.1.0 */ def time[T](f: => T): T = { val start = System.nanoTime() val ret = f val end = System.nanoTime() // scalastyle:off println println(s"Time taken: ${NANOSECONDS.toMillis(end - start)} ms") // scalastyle:on println ret } // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into `DataFrame`s. * * {{{ * val sparkSession = SparkSession.builder.getOrCreate() * import sparkSession.implicits._ * }}} * * @since 2.0.0 */ object implicits extends SQLImplicits with Serializable { protected override def _sqlContext: SQLContext = SparkSession.this.sqlContext } // scalastyle:on /** * Stop the underlying `SparkContext`. * * @since 2.0.0 */ def stop(): Unit = { sparkContext.stop() } /** * Synonym for `stop()`. * * @since 2.1.0 */ override def close(): Unit = stop() /** * Parses the data type in our internal string representation. The data type string should * have the same format as the one generated by `toString` in scala. * It is only used by PySpark. */ protected[sql] def parseDataType(dataTypeString: String): DataType = { DataType.fromJson(dataTypeString) } /** * Apply a schema defined by the schemaString to an RDD. It is only used by PySpark. */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schemaString: String): DataFrame = { val schema = DataType.fromJson(schemaString).asInstanceOf[StructType] applySchemaToPythonRDD(rdd, schema) } /** * Apply `schema` to an RDD. * * @note Used by PySpark only */ private[sql] def applySchemaToPythonRDD( rdd: RDD[Array[Any]], schema: StructType): DataFrame = { val rowRdd = rdd.mapPartitions { iter => val fromJava = python.EvaluatePython.makeFromJava(schema) iter.map(r => fromJava(r).asInstanceOf[InternalRow]) } internalCreateDataFrame(rowRdd, schema) } /** * Returns a Catalyst Schema for the given java bean class. */ private def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } } /** * Execute a block of code with the this session set as the active session, and restore the * previous session on completion. */ private[sql] def withActive[T](block: => T): T = { // Use the active session thread local directly to make sure we get the session that is actually // set and not the default session. This to prevent that we promote the default session to the // active session once we are done. val old = SparkSession.activeThreadSession.get() SparkSession.setActiveSession(this) try block finally { SparkSession.setActiveSession(old) } } private[sql] def leafNodeDefaultParallelism: Int = { conf.get(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM).getOrElse(sparkContext.defaultParallelism) }}@Stableobject SparkSession extends Logging { /** * Builder for [[SparkSession]]. */ @Stable class Builder extends Logging { private[this] val options = new scala.collection.mutable.HashMap[String, String] private[this] val extensions = new SparkSessionExtensions private[this] var userSuppliedContext: Option[SparkContext] = None private[spark] def sparkContext(sparkContext: SparkContext): Builder = synchronized { userSuppliedContext = Option(sparkContext) this } /** * Sets a name for the application, which will be shown in the Spark web UI. * If no application name is set, a randomly generated name will be used. * * @since 2.0.0 */ def appName(name: String): Builder = config("spark.app.name", name) /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: String): Builder = synchronized { options += key -> value this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Long): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Double): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 2.0.0 */ def config(key: String, value: Boolean): Builder = synchronized { options += key -> value.toString this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: Map[String, Any]): Builder = synchronized { map.foreach { kv: (String, Any) => { options += kv._1 -> kv._2.toString } } this } /** * Sets a config option. Options set using this method are automatically propagated to * both `SparkConf` and SparkSession's own configuration. * * @since 3.4.0 */ def config(map: java.util.Map[String, Any]): Builder = synchronized { config(map.asScala.toMap) } /** * Sets a list of config options based on the given `SparkConf`. * * @since 2.0.0 */ def config(conf: SparkConf): Builder = synchronized { conf.getAll.foreach { case (k, v) => options += k -> v } this } /** * Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. * * @since 2.0.0 */ def master(master: String): Builder = config("spark.master", master) /** * Enables Hive support, including connectivity to a persistent Hive metastore, support for * Hive serdes, and Hive user-defined functions. * * @since 2.0.0 */ def enableHiveSupport(): Builder = synchronized { if (hiveClassesArePresent) { config(CATALOG_IMPLEMENTATION.key, "hive") } else { throw new IllegalArgumentException( "Unable to instantiate SparkSession with Hive support because " + "Hive classes are not found.") } } /** * Inject extensions into the [[SparkSession]]. This allows a user to add Analyzer rules, * Optimizer rules, Planning Strategies or a customized parser. * * @since 2.2.0 */ def withExtensions(f: SparkSessionExtensions => Unit): Builder = synchronized { f(extensions) this } /** * Gets an existing [[SparkSession]] or, if there is no existing one, creates a new * one based on the options set in this builder. * * This method first checks whether there is a valid thread-local SparkSession, * and if yes, return that one. It then checks whether there is a valid global * default SparkSession, and if yes, return that one. If no valid global default * SparkSession exists, the method creates a new SparkSession and assigns the * newly created SparkSession as the global default. * * In case an existing SparkSession is returned, the non-static config options specified in * this builder will be applied to the existing SparkSession. * * @since 2.0.0 */ def getOrCreate(): SparkSession = synchronized { val sparkConf = new SparkConf() options.foreach { case (k, v) => sparkConf.set(k, v) } if (!sparkConf.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { assertOnDriver() } // Get the session from current thread's active session. var session = activeThreadSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // Global synchronization so we will only set the default session once. SparkSession.synchronized { // If the current thread does not have an active session, get it from the global session. session = defaultSession.get() if ((session ne null) && !session.sparkContext.isStopped) { applyModifiableSettings(session, new java.util.HashMap[String, String](options.asJava)) return session } // No active nor global default session. Create a new one. val sparkContext = userSuppliedContext.getOrElse { // set a random app name if not given. if (!sparkConf.contains("spark.app.name")) { sparkConf.setAppName(java.util.UUID.randomUUID().toString) } SparkContext.getOrCreate(sparkConf) // Do not update `SparkConf` for existing `SparkContext`, as it's shared by all sessions. } loadExtensions(extensions) applyExtensions(sparkContext, extensions) session = new SparkSession(sparkContext, None, None, extensions, options.toMap) setDefaultSession(session) setActiveSession(session) registerContextListener(sparkContext) } return session } } /** * Creates a [[SparkSession.Builder]] for constructing a [[SparkSession]]. * * @since 2.0.0 */ def builder(): Builder = new Builder /** * Changes the SparkSession that will be returned in this thread and its children when * SparkSession.getOrCreate() is called. This can be used to ensure that a given thread receives * a SparkSession with an isolated session, instead of the global (first created) context. * * @since 2.0.0 */ def setActiveSession(session: SparkSession): Unit = { activeThreadSession.set(session) } /** * Clears the active SparkSession for current thread. Subsequent calls to getOrCreate will * return the first created context instead of a thread-local override. * * @since 2.0.0 */ def clearActiveSession(): Unit = { activeThreadSession.remove() } /** * Sets the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def setDefaultSession(session: SparkSession): Unit = { defaultSession.set(session) } /** * Clears the default SparkSession that is returned by the builder. * * @since 2.0.0 */ def clearDefaultSession(): Unit = { defaultSession.set(null) } /** * Returns the active SparkSession for the current thread, returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(activeThreadSession.get) } } /** * Returns the default SparkSession that is returned by the builder. * * @note Return None, when calling this function on executors * * @since 2.2.0 */ def getDefaultSession: Option[SparkSession] = { if (Utils.isInRunningSparkTask) { // Return None when running on executors. None } else { Option(defaultSession.get) } } /** * Returns the currently active SparkSession, otherwise the default one. If there is no default * SparkSession, throws an exception. * * @since 2.4.0 */ def active: SparkSession = { getActiveSession.getOrElse(getDefaultSession.getOrElse( throw new IllegalStateException("No active or default Spark session found"))) } /** * Apply modifiable settings to an existing [[SparkSession]]. This method are used * both in Scala and Python, so put this under [[SparkSession]] object. */ private[sql] def applyModifiableSettings( session: SparkSession, options: java.util.HashMap[String, String]): Unit = { // Lazy val to avoid an unnecessary session state initialization lazy val conf = session.sessionState.conf val dedupOptions = if (options.isEmpty) Map.empty[String, String] else ( options.asScala.toSet -- conf.getAllConfs.toSet).toMap val (staticConfs, otherConfs) = dedupOptions.partition(kv => SQLConf.isStaticConfigKey(kv._1)) otherConfs.foreach { case (k, v) => conf.setConfString(k, v) } // Note that other runtime SQL options, for example, for other third-party datasource // can be marked as an ignored configuration here. val maybeIgnoredConfs = otherConfs.filterNot { case (k, _) => conf.isModifiable(k) } if (staticConfs.nonEmpty || maybeIgnoredConfs.nonEmpty) { logWarning( "Using an existing Spark session; only runtime SQL configurations will take effect.") } if (staticConfs.nonEmpty) { logDebug("Ignored static SQL configurations:\n " + conf.redactOptions(staticConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } if (maybeIgnoredConfs.nonEmpty) { // Only print out non-static and non-runtime SQL configurations. // Note that this might show core configurations or source specific // options defined in the third-party datasource. logDebug("Configurations that might not take effect:\n " + conf.redactOptions( maybeIgnoredConfs).toSeq.map { case (k, v) => s"$k=$v" }.mkString("\n ")) } } /** * Returns a cloned SparkSession with all specified configurations disabled, or * the original SparkSession if all configurations are already disabled. */ private[sql] def getOrCloneSessionWithConfigsOff( session: SparkSession, configurations: Seq[ConfigEntry[Boolean]]): SparkSession = { val configsEnabled = configurations.filter(session.conf.get[Boolean]) if (configsEnabled.isEmpty) { session } else { val newSession = session.cloneSession() configsEnabled.foreach(conf => { newSession.conf.set(conf, false) }) newSession } } //////////////////////////////////////////////////////////////////////////////////////// // Private methods from now on //////////////////////////////////////////////////////////////////////////////////////// private val listenerRegistered: AtomicBoolean = new AtomicBoolean(false) /** Register the AppEnd listener onto the Context */ private def registerContextListener(sparkContext: SparkContext): Unit = { if (!listenerRegistered.get()) { sparkContext.addSparkListener(new SparkListener { override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { defaultSession.set(null) listenerRegistered.set(false) } }) listenerRegistered.set(true) } } /** The active SparkSession for the current thread. */ private val activeThreadSession = new InheritableThreadLocal[SparkSession] /** Reference to the root SparkSession. */ private val defaultSession = new AtomicReference[SparkSession] private val HIVE_SESSION_STATE_BUILDER_CLASS_NAME = "org.apache.spark.sql.hive.HiveSessionStateBuilder" private def sessionStateClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_SESSION_STATE_BUILDER_CLASS_NAME case "in-memory" => classOf[SessionStateBuilder].getCanonicalName } } private def assertOnDriver(): Unit = { if (TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkSession should only be created and accessed on the driver.") } } /** * Helper method to create an instance of `SessionState` based on `className` from conf. * The result is either `SessionState` or a Hive based `SessionState`. */ private def instantiateSessionState( className: String, sparkSession: SparkSession): SessionState = { try { // invoke new [Hive]SessionStateBuilder( // SparkSession, // Option[SessionState]) val clazz = Utils.classForName(className) val ctor = clazz.getConstructors.head ctor.newInstance(sparkSession, None).asInstanceOf[BaseSessionStateBuilder].build() } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Error while instantiating '$className':", e) } } /** * @return true if Hive classes can be loaded, otherwise false. */ private[spark] def hiveClassesArePresent: Boolean = { try { Utils.classForName(HIVE_SESSION_STATE_BUILDER_CLASS_NAME) Utils.classForName("org.apache.hadoop.hive.conf.HiveConf") true } catch { case _: ClassNotFoundException | _: NoClassDefFoundError => false } } private[spark] def cleanupAnyExistingSession(): Unit = { val session = getActiveSession.orElse(getDefaultSession) if (session.isDefined) { logWarning( s"""An existing Spark session exists as the active or default session. |This probably means another suite leaked it. Attempting to stop it before continuing. |This existing Spark session was created at: | |${session.get.creationSite.longForm} | """.stripMargin) session.get.stop() SparkSession.clearActiveSession() SparkSession.clearDefaultSession() } } /** * Initialize extensions specified in [[StaticSQLConf]]. The classes will be applied to the * extensions passed into this function. */ private def applyExtensions( sparkContext: SparkContext, extensions: SparkSessionExtensions): SparkSessionExtensions = { val extensionConfClassNames = sparkContext.getConf.get(StaticSQLConf.SPARK_SESSION_EXTENSIONS) .getOrElse(Seq.empty) extensionConfClassNames.foreach { extensionConfClassName => try { val extensionConfClass = Utils.classForName(extensionConfClassName) val extensionConf = extensionConfClass.getConstructor().newInstance() .asInstanceOf[SparkSessionExtensions => Unit] extensionConf(extensions) } catch { // Ignore the error if we cannot find the class or when the class has the wrong type. case e@(_: ClassCastException | _: ClassNotFoundException | _: NoClassDefFoundError) => logWarning(s"Cannot use $extensionConfClassName to configure session extensions.", e) } } extensions } /** * Load extensions from [[ServiceLoader]] and use them */ private def loadExtensions(extensions: SparkSessionExtensions): Unit = { val loader = ServiceLoader.load(classOf[SparkSessionExtensionsProvider], Utils.getContextOrSparkClassLoader) val loadedExts = loader.iterator() while (loadedExts.hasNext) { try { val ext = loadedExts.next() ext(extensions) } catch { case e: Throwable => logWarning("Failed to load session extension", e) } } }} </s>
<s>[INST] <<SYS>> Below is an instruction that describes a task. Write a response that appropriately completes the request. <</SYS>> Formulate an equation to calculate the height of a triangle given the angle, side lengths and opposite side length. [/INST] /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.spark.sqlimport java.util.Propertiesimport scala.collection.immutableimport scala.reflect.runtime.universe.TypeTagimport org.apache.spark.{SparkConf, SparkContext}import org.apache.spark.annotation.{DeveloperApi, Experimental, Stable, Unstable}import org.apache.spark.api.java.{JavaRDD, JavaSparkContext}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config.ConfigEntryimport org.apache.spark.rdd.RDDimport org.apache.spark.sql.catalyst._import org.apache.spark.sql.catalyst.analysis.UnresolvedNamespaceimport org.apache.spark.sql.catalyst.expressions._import org.apache.spark.sql.catalyst.plans.logical.ShowTablesimport org.apache.spark.sql.internal.{SessionState, SharedState, SQLConf}import org.apache.spark.sql.sources.BaseRelationimport org.apache.spark.sql.streaming.{DataStreamReader, StreamingQueryManager}import org.apache.spark.sql.types._import org.apache.spark.sql.util.ExecutionListenerManager/** * The entry point for working with structured data (rows and columns) in Spark 1.x. * * As of Spark 2.0, this is replaced by [[SparkSession]]. However, we are keeping the class * here for backward compatibility. * * @groupname basic Basic Operations * @groupname ddl_ops Persistent Catalog DDL * @groupname cachemgmt Cached Table Management * @groupname genericdata Generic Data Sources * @groupname specificdata Specific Data Sources * @groupname config Configuration * @groupname dataframes Custom DataFrame Creation * @groupname dataset Custom Dataset Creation * @groupname Ungrouped Support functions for language integrated queries * @since 1.0.0 */@Stableclass SQLContext private[sql](val sparkSession: SparkSession) extends Logging with Serializable { self => sparkSession.sparkContext.assertNotStopped() // Note: Since Spark 2.0 this class has become a wrapper of SparkSession, where the // real functionality resides. This class remains mainly for backward compatibility. @deprecated("Use SparkSession.builder instead", "2.0.0") def this(sc: SparkContext) = { this(SparkSession.builder().sparkContext(sc).getOrCreate()) } @deprecated("Use SparkSession.builder instead", "2.0.0") def this(sparkContext: JavaSparkContext) = this(sparkContext.sc) // TODO: move this logic into SparkSession private[sql] def sessionState: SessionState = sparkSession.sessionState private[sql] def sharedState: SharedState = sparkSession.sharedState private[sql] def conf: SQLConf = sessionState.conf def sparkContext: SparkContext = sparkSession.sparkContext /** * Returns a [[SQLContext]] as new session, with separated SQL configurations, temporary * tables, registered functions, but sharing the same `SparkContext`, cached data and * other things. * * @since 1.6.0 */ def newSession(): SQLContext = sparkSession.newSession().sqlContext /** * An interface to register custom [[org.apache.spark.sql.util.QueryExecutionListener]]s * that listen for execution metrics. */ def listenerManager: ExecutionListenerManager = sparkSession.listenerManager /** * Set Spark SQL configuration properties. * * @group config * @since 1.0.0 */ def setConf(props: Properties): Unit = { sessionState.conf.setConf(props) } /** * Set the given Spark SQL configuration property. */ private[sql] def setConf[T](entry: ConfigEntry[T], value: T): Unit = { sessionState.conf.setConf(entry, value) } /** * Set the given Spark SQL configuration property. * * @group config * @since 1.0.0 */ def setConf(key: String, value: String): Unit = { sparkSession.conf.set(key, value) } /** * Return the value of Spark SQL configuration property for the given key. * * @group config * @since 1.0.0 */ def getConf(key: String): String = { sparkSession.conf.get(key) } /** * Return the value of Spark SQL configuration property for the given key. If the key is not set * yet, return `defaultValue`. * * @group config * @since 1.0.0 */ def getConf(key: String, defaultValue: String): String = { sparkSession.conf.get(key, defaultValue) } /** * Return all the configuration properties that have been set (i.e. not the default). * This creates a new copy of the config properties in the form of a Map. * * @group config * @since 1.0.0 */ def getAllConfs: immutable.Map[String, String] = { sparkSession.conf.getAll } /** * :: Experimental :: * A collection of methods that are considered experimental, but can be used to hook into * the query planner for advanced functionality. * * @group basic * @since 1.3.0 */ @Experimental @transient @Unstable def experimental: ExperimentalMethods = sparkSession.experimental /** * Returns a `DataFrame` with no rows or columns. * * @group basic * @since 1.3.0 */ def emptyDataFrame: DataFrame = sparkSession.emptyDataFrame /** * A collection of methods for registering user-defined functions (UDF). * * The following example registers a Scala closure as UDF: * {{{ * sqlContext.udf.register("myUDF", (arg1: Int, arg2: String) => arg2 + arg1) * }}} * * The following example registers a UDF in Java: * {{{ * sqlContext.udf().register("myUDF", * (Integer arg1, String arg2) -> arg2 + arg1, * DataTypes.StringType); * }}} * * @note The user-defined functions must be deterministic. Due to optimization, * duplicate invocations may be eliminated or the function may even be invoked more times than * it is present in the query. * * @group basic * @since 1.3.0 */ def udf: UDFRegistration = sparkSession.udf /** * Returns true if the table is currently cached in-memory. * @group cachemgmt * @since 1.3.0 */ def isCached(tableName: String): Boolean = { sparkSession.catalog.isCached(tableName) } /** * Caches the specified table in-memory. * @group cachemgmt * @since 1.3.0 */ def cacheTable(tableName: String): Unit = { sparkSession.catalog.cacheTable(tableName) } /** * Removes the specified table from the in-memory cache. * @group cachemgmt * @since 1.3.0 */ def uncacheTable(tableName: String): Unit = { sparkSession.catalog.uncacheTable(tableName) } /** * Removes all cached tables from the in-memory cache. * @since 1.3.0 */ def clearCache(): Unit = { sparkSession.catalog.clearCache() } // scalastyle:off // Disable style checker so "implicits" object can start with lowercase i /** * (Scala-specific) Implicit methods available in Scala for converting * common Scala objects into `DataFrame`s. * * {{{ * val sqlContext = new SQLContext(sc) * import sqlContext.implicits._ * }}} * * @group basic * @since 1.3.0 */ object implicits extends SQLImplicits with Serializable { protected override def _sqlContext: SQLContext = self } // scalastyle:on /** * Creates a DataFrame from an RDD of Product (e.g. case classes, tuples). * * @group dataframes * @since 1.3.0 */ def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = { sparkSession.createDataFrame(rdd) } /** * Creates a DataFrame from a local Seq of Product. * * @group dataframes * @since 1.3.0 */ def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = { sparkSession.createDataFrame(data) } /** * Convert a `BaseRelation` created for external data sources into a `DataFrame`. * * @group dataframes * @since 1.3.0 */ def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = { sparkSession.baseRelationToDataFrame(baseRelation) } /** * :: DeveloperApi :: * Creates a `DataFrame` from an `RDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * Example: * {{{ * import org.apache.spark.sql._ * import org.apache.spark.sql.types._ * val sqlContext = new org.apache.spark.sql.SQLContext(sc) * * val schema = * StructType( * StructField("name", StringType, false) :: * StructField("age", IntegerType, true) :: Nil) * * val people = * sc.textFile("examples/src/main/resources/people.txt").map( * _.split(",")).map(p => Row(p(0), p(1).trim.toInt)) * val dataFrame = sqlContext.createDataFrame(people, schema) * dataFrame.printSchema * // root * // |-- name: string (nullable = false) * // |-- age: integer (nullable = true) * * dataFrame.createOrReplaceTempView("people") * sqlContext.sql("select name from people").collect.foreach(println) * }}} * * @group dataframes * @since 1.3.0 */ @DeveloperApi def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame = { sparkSession.createDataFrame(rowRDD, schema) } /** * Creates a [[Dataset]] from a local Seq of data of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Example == * * {{{ * * import spark.implicits._ * case class Person(name: String, age: Long) * val data = Seq(Person("Michael", 29), Person("Andy", 30), Person("Justin", 19)) * val ds = spark.createDataset(data) * * ds.show() * // +-------+---+ * // | name|age| * // +-------+---+ * // |Michael| 29| * // | Andy| 30| * // | Justin| 19| * // +-------+---+ * }}} * * @since 2.0.0 * @group dataset */ def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = { sparkSession.createDataset(data) } /** * Creates a [[Dataset]] from an RDD of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * @since 2.0.0 * @group dataset */ def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = { sparkSession.createDataset(data) } /** * Creates a [[Dataset]] from a `java.util.List` of a given type. This method requires an * encoder (to convert a JVM object of type `T` to and from the internal Spark SQL representation) * that is generally created automatically through implicits from a `SparkSession`, or can be * created explicitly by calling static methods on [[Encoders]]. * * == Java Example == * * {{{ * List<String> data = Arrays.asList("hello", "world"); * Dataset<String> ds = spark.createDataset(data, Encoders.STRING()); * }}} * * @since 2.0.0 * @group dataset */ def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = { sparkSession.createDataset(data) } /** * Creates a DataFrame from an RDD[Row]. User can specify whether the input rows should be * converted to Catalyst rows. */ private[sql] def internalCreateDataFrame( catalystRows: RDD[InternalRow], schema: StructType, isStreaming: Boolean = false) = { sparkSession.internalCreateDataFrame(catalystRows, schema, isStreaming) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `JavaRDD` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided RDD matches * the provided schema. Otherwise, there will be runtime exception. * * @group dataframes * @since 1.3.0 */ @DeveloperApi def createDataFrame(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { sparkSession.createDataFrame(rowRDD, schema) } /** * :: DeveloperApi :: * Creates a `DataFrame` from a `java.util.List` containing [[Row]]s using the given schema. * It is important to make sure that the structure of every [[Row]] of the provided List matches * the provided schema. Otherwise, there will be runtime exception. * * @group dataframes * @since 1.6.0 */ @DeveloperApi def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = { sparkSession.createDataFrame(rows, schema) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @group dataframes * @since 1.3.0 */ def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = { sparkSession.createDataFrame(rdd, beanClass) } /** * Applies a schema to an RDD of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @group dataframes * @since 1.3.0 */ def createDataFrame(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { sparkSession.createDataFrame(rdd, beanClass) } /** * Applies a schema to a List of Java Beans. * * WARNING: Since there is no guaranteed ordering for fields in a Java Bean, * SELECT * queries will return the columns in an undefined order. * @group dataframes * @since 1.6.0 */ def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = { sparkSession.createDataFrame(data, beanClass) } /** * Returns a [[DataFrameReader]] that can be used to read non-streaming data in as a * `DataFrame`. * {{{ * sqlContext.read.parquet("/path/to/file.parquet") * sqlContext.read.schema(schema).json("/path/to/file.json") * }}} * * @group genericdata * @since 1.4.0 */ def read: DataFrameReader = sparkSession.read /** * Returns a `DataStreamReader` that can be used to read streaming data in as a `DataFrame`. * {{{ * sparkSession.readStream.parquet("/path/to/directory/of/parquet/files") * sparkSession.readStream.schema(schema).json("/path/to/directory/of/json/files") * }}} * * @since 2.0.0 */ def readStream: DataStreamReader = sparkSession.readStream /** * Creates an external table from the given path and returns the corresponding DataFrame. * It will use the default data source configured by spark.sql.sources.default. * * @group ddl_ops * @since 1.3.0 */ @deprecated("use sparkSession.catalog.createTable instead.", "2.2.0") def createExternalTable(tableName: String, path: String): DataFrame = { sparkSession.catalog.createTable(tableName, path) } /** * Creates an external table from the given path based on a data source * and returns the corresponding DataFrame. * * @group ddl_ops * @since 1.3.0 */ @deprecated("use sparkSession.catalog.createTable instead.", "2.2.0") def createExternalTable( tableName: String, path: String, source: String): DataFrame = { sparkSession.catalog.createTable(tableName, path, source) } /** * Creates an external table from the given path based on a data source and a set of options. * Then, returns the corresponding DataFrame. * * @group ddl_ops * @since 1.3.0 */ @deprecated("use sparkSession.catalog.createTable instead.", "2.2.0") def createExternalTable( tableName: String, source: String, options: java.util.Map[String, String]): DataFrame = { sparkSession.catalog.createTable(tableName, source, options) } /** * (Scala-specific) * Creates an external table from the given path based on a data source and a set of options. * Then, returns the corresponding DataFrame. * * @group ddl_ops * @since 1.3.0 */ @deprecated("use sparkSession.catalog.createTable instead.", "2.2.0") def createExternalTable( tableName: String, source: String, options: Map[String, String]): DataFrame = { sparkSession.catalog.createTable(tableName, source, options) } /** * Create an external table from the given path based on a data source, a schema and * a set of options. Then, returns the corresponding DataFrame. * * @group ddl_ops * @since 1.3.0 */ @deprecated("use sparkSession.catalog.createTable instead.", "2.2.0") def createExternalTable( tableName: String, source: String, schema: StructType, options: java.util.Map[String, String]): DataFrame = { sparkSession.catalog.createTable(tableName, source, schema, options) } /** * (Scala-specific) * Create an external table from the given path based on a data source, a schema and * a set of options. Then, returns the corresponding DataFrame. * * @group ddl_ops * @since 1.3.0 */ @deprecated("use sparkSession.catalog.createTable instead.", "2.2.0") def createExternalTable( tableName: String, source: String, schema: StructType, options: Map[String, String]): DataFrame = { sparkSession.catalog.createTable(tableName, source, schema, options) } /** * Registers the given `DataFrame` as a temporary table in the catalog. Temporary tables exist * only during the lifetime of this instance of SQLContext. */ private[sql] def registerDataFrameAsTable(df: DataFrame, tableName: String): Unit = { df.createOrReplaceTempView(tableName) } /** * Drops the temporary table with the given table name in the catalog. If the table has been * cached/persisted before, it's also unpersisted. * * @param tableName the name of the table to be unregistered. * @group basic * @since 1.3.0 */ def dropTempTable(tableName: String): Unit = { sparkSession.catalog.dropTempView(tableName) } /** * Creates a `DataFrame` with a single `LongType` column named `id`, containing elements * in a range from 0 to `end` (exclusive) with step value 1. * * @since 1.4.1 * @group dataframe */ def range(end: Long): DataFrame = sparkSession.range(end).toDF() /** * Creates a `DataFrame` with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with step value 1. * * @since 1.4.0 * @group dataframe */ def range(start: Long, end: Long): DataFrame = sparkSession.range(start, end).toDF() /** * Creates a `DataFrame` with a single `LongType` column named `id`, containing elements * in a range from `start` to `end` (exclusive) with a step value. * * @since 2.0.0 * @group dataframe */ def range(start: Long, end: Long, step: Long): DataFrame = { sparkSession.range(start, end, step).toDF() } /** * Creates a `DataFrame` with a single `LongType` column named `id`, containing elements * in an range from `start` to `end` (exclusive) with an step value, with partition number * specified. * * @since 1.4.0 * @group dataframe */ def range(start: Long, end: Long, step: Long, numPartitions: Int): DataFrame = { sparkSession.range(start, end, step, numPartitions).toDF() } /** * Executes a SQL query using Spark, returning the result as a `DataFrame`. * This API eagerly runs DDL/DML commands, but not for SELECT queries. * * @group basic * @since 1.3.0 */ def sql(sqlText: String): DataFrame = sparkSession.sql(sqlText) /** * Returns the specified table as a `DataFrame`. * * @group ddl_ops * @since 1.3.0 */ def table(tableName: String): DataFrame = { sparkSession.table(tableName) } /** * Returns a `DataFrame` containing names of existing tables in the current database. * The returned DataFrame has three columns, database, tableName and isTemporary (a Boolean * indicating if a table is a temporary one or not). * * @group ddl_ops * @since 1.3.0 */ def tables(): DataFrame = { Dataset.ofRows(sparkSession, ShowTables(UnresolvedNamespace(Nil), None)) } /** * Returns a `DataFrame` containing names of existing tables in the given database. * The returned DataFrame has three columns, database, tableName and isTemporary (a Boolean * indicating if a table is a temporary one or not). * * @group ddl_ops * @since 1.3.0 */ def tables(databaseName: String): DataFrame = { Dataset.ofRows(sparkSession, ShowTables(UnresolvedNamespace(Seq(databaseName)), None)) } /** * Returns a `StreamingQueryManager` that allows managing all the * [[org.apache.spark.sql.streaming.StreamingQuery StreamingQueries]] active on `this` context. * * @since 2.0.0 */ def streams: StreamingQueryManager = sparkSession.streams /** * Returns the names of tables in the current database as an array. * * @group ddl_ops * @since 1.3.0 */ def tableNames(): Array[String] = { tableNames(sparkSession.catalog.currentDatabase) } /** * Returns the names of tables in the given database as an array. * * @group ddl_ops * @since 1.3.0 */ def tableNames(databaseName: String): Array[String] = { sessionState.catalog.listTables(databaseName).map(_.table).toArray } //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// // Deprecated methods //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// /** * @deprecated As of 1.3.0, replaced by `createDataFrame()`. */ @deprecated("Use createDataFrame instead.", "1.3.0") def applySchema(rowRDD: RDD[Row], schema: StructType): DataFrame = { createDataFrame(rowRDD, schema) } /** * @deprecated As of 1.3.0, replaced by `createDataFrame()`. */ @deprecated("Use createDataFrame instead.", "1.3.0") def applySchema(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = { createDataFrame(rowRDD, schema) } /** * @deprecated As of 1.3.0, replaced by `createDataFrame()`. */ @deprecated("Use createDataFrame instead.", "1.3.0") def applySchema(rdd: RDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd, beanClass) } /** * @deprecated As of 1.3.0, replaced by `createDataFrame()`. */ @deprecated("Use createDataFrame instead.", "1.3.0") def applySchema(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = { createDataFrame(rdd, beanClass) } /** * Loads a Parquet file, returning the result as a `DataFrame`. This function returns an empty * `DataFrame` if no paths are passed in. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().parquet()`. */ @deprecated("Use read.parquet() instead.", "1.4.0") @scala.annotation.varargs def parquetFile(paths: String*): DataFrame = { if (paths.isEmpty) { emptyDataFrame } else { read.parquet(paths : _*) } } /** * Loads a JSON file (one object per line), returning the result as a `DataFrame`. * It goes through the entire dataset once to determine the schema. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json() instead.", "1.4.0") def jsonFile(path: String): DataFrame = { read.json(path) } /** * Loads a JSON file (one object per line) and applies the given schema, * returning the result as a `DataFrame`. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json() instead.", "1.4.0") def jsonFile(path: String, schema: StructType): DataFrame = { read.schema(schema).json(path) } /** * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json() instead.", "1.4.0") def jsonFile(path: String, samplingRatio: Double): DataFrame = { read.option("samplingRatio", samplingRatio.toString).json(path) } /** * Loads an RDD[String] storing JSON objects (one object per record), returning the result as a * `DataFrame`. * It goes through the entire dataset once to determine the schema. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json() instead.", "1.4.0") def jsonRDD(json: RDD[String]): DataFrame = read.json(json) /** * Loads an RDD[String] storing JSON objects (one object per record), returning the result as a * `DataFrame`. * It goes through the entire dataset once to determine the schema. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json() instead.", "1.4.0") def jsonRDD(json: JavaRDD[String]): DataFrame = read.json(json) /** * Loads an RDD[String] storing JSON objects (one object per record) and applies the given schema, * returning the result as a `DataFrame`. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json() instead.", "1.4.0") def jsonRDD(json: RDD[String], schema: StructType): DataFrame = { read.schema(schema).json(json) } /** * Loads an JavaRDD[String] storing JSON objects (one object per record) and applies the given * schema, returning the result as a `DataFrame`. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json() instead.", "1.4.0") def jsonRDD(json: JavaRDD[String], schema: StructType): DataFrame = { read.schema(schema).json(json) } /** * Loads an RDD[String] storing JSON objects (one object per record) inferring the * schema, returning the result as a `DataFrame`. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json() instead.", "1.4.0") def jsonRDD(json: RDD[String], samplingRatio: Double): DataFrame = { read.option("samplingRatio", samplingRatio.toString).json(json) } /** * Loads a JavaRDD[String] storing JSON objects (one object per record) inferring the * schema, returning the result as a `DataFrame`. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().json()`. */ @deprecated("Use read.json() instead.", "1.4.0") def jsonRDD(json: JavaRDD[String], samplingRatio: Double): DataFrame = { read.option("samplingRatio", samplingRatio.toString).json(json) } /** * Returns the dataset stored at path as a DataFrame, * using the default data source configured by spark.sql.sources.default. * * @group genericdata * @deprecated As of 1.4.0, replaced by `read().load(path)`. */ @deprecated("Use read.load(path) instead.", "1.4.0") def load(path: String): DataFrame = { read.load(path) } /** * Returns the dataset stored at path as a DataFrame, using the given data source. * * @group genericdata * @deprecated As of 1.4.0, replaced by `read().format(source).load(path)`. */ @deprecated("Use read.format(source).load(path) instead.", "1.4.0") def load(path: String, source: String): DataFrame = { read.format(source).load(path) } /** * (Java-specific) Returns the dataset specified by the given data source and * a set of options as a DataFrame. * * @group genericdata * @deprecated As of 1.4.0, replaced by `read().format(source).options(options).load()`. */ @deprecated("Use read.format(source).options(options).load() instead.", "1.4.0") def load(source: String, options: java.util.Map[String, String]): DataFrame = { read.options(options).format(source).load() } /** * (Scala-specific) Returns the dataset specified by the given data source and * a set of options as a DataFrame. * * @group genericdata * @deprecated As of 1.4.0, replaced by `read().format(source).options(options).load()`. */ @deprecated("Use read.format(source).options(options).load() instead.", "1.4.0") def load(source: String, options: Map[String, String]): DataFrame = { read.options(options).format(source).load() } /** * (Java-specific) Returns the dataset specified by the given data source and * a set of options as a DataFrame, using the given schema as the schema of the DataFrame. * * @group genericdata * @deprecated As of 1.4.0, replaced by * `read().format(source).schema(schema).options(options).load()`. */ @deprecated("Use read.format(source).schema(schema).options(options).load() instead.", "1.4.0") def load( source: String, schema: StructType, options: java.util.Map[String, String]): DataFrame = { read.format(source).schema(schema).options(options).load() } /** * (Scala-specific) Returns the dataset specified by the given data source and * a set of options as a DataFrame, using the given schema as the schema of the DataFrame. * * @group genericdata * @deprecated As of 1.4.0, replaced by * `read().format(source).schema(schema).options(options).load()`. */ @deprecated("Use read.format(source).schema(schema).options(options).load() instead.", "1.4.0") def load(source: String, schema: StructType, options: Map[String, String]): DataFrame = { read.format(source).schema(schema).options(options).load() } /** * Construct a `DataFrame` representing the database table accessible via JDBC URL * url named table. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().jdbc()`. */ @deprecated("Use read.jdbc() instead.", "1.4.0") def jdbc(url: String, table: String): DataFrame = { read.jdbc(url, table, new Properties) } /** * Construct a `DataFrame` representing the database table accessible via JDBC URL * url named table. Partitions of the table will be retrieved in parallel based on the parameters * passed to this function. * * @param columnName the name of a column of integral type that will be used for partitioning. * @param lowerBound the minimum value of `columnName` used to decide partition stride * @param upperBound the maximum value of `columnName` used to decide partition stride * @param numPartitions the number of partitions. the range `minValue`-`maxValue` will be split * evenly into this many partitions * @group specificdata * @deprecated As of 1.4.0, replaced by `read().jdbc()`. */ @deprecated("Use read.jdbc() instead.", "1.4.0") def jdbc( url: String, table: String, columnName: String, lowerBound: Long, upperBound: Long, numPartitions: Int): DataFrame = { read.jdbc(url, table, columnName, lowerBound, upperBound, numPartitions, new Properties) } /** * Construct a `DataFrame` representing the database table accessible via JDBC URL * url named table. The theParts parameter gives a list expressions * suitable for inclusion in WHERE clauses; each one defines one partition * of the `DataFrame`. * * @group specificdata * @deprecated As of 1.4.0, replaced by `read().jdbc()`. */ @deprecated("Use read.jdbc() instead.", "1.4.0") def jdbc(url: String, table: String, theParts: Array[String]): DataFrame = { read.jdbc(url, table, theParts, new Properties) }}/** * This SQLContext object contains utility functions to create a singleton SQLContext instance, * or to get the created SQLContext instance. * * It also provides utility functions to support preference for threads in multiple sessions * scenario, setActive could set a SQLContext for current thread, which will be returned by * getOrCreate instead of the global one. */object SQLContext { /** * Get the singleton SQLContext if it exists or create a new one using the given SparkContext. * * This function can be used to create a singleton SQLContext object that can be shared across * the JVM. * * If there is an active SQLContext for current thread, it will be returned instead of the global * one. * * @since 1.5.0 */ @deprecated("Use SparkSession.builder instead", "2.0.0") def getOrCreate(sparkContext: SparkContext): SQLContext = { SparkSession.builder().sparkContext(sparkContext).getOrCreate().sqlContext } /** * Changes the SQLContext that will be returned in this thread and its children when * SQLContext.getOrCreate() is called. This can be used to ensure that a given thread receives * a SQLContext with an isolated session, instead of the global (first created) context. * * @since 1.6.0 */ @deprecated("Use SparkSession.setActiveSession instead", "2.0.0") def setActive(sqlContext: SQLContext): Unit = { SparkSession.setActiveSession(sqlContext.sparkSession) } /** * Clears the active SQLContext for current thread. Subsequent calls to getOrCreate will * return the first created context instead of a thread-local override. * * @since 1.6.0 */ @deprecated("Use SparkSession.clearActiveSession instead", "2.0.0") def clearActive(): Unit = { SparkSession.clearActiveSession() } /** * Converts an iterator of Java Beans to InternalRow using the provided * bean info & schema. This is not related to the singleton, but is a static * method for internal use. */ private[sql] def beansToRows( data: Iterator[_], beanClass: Class[_], attrs: Seq[AttributeReference]): Iterator[InternalRow] = { def createStructConverter(cls: Class[_], fieldTypes: Seq[DataType]): Any => InternalRow = { val methodConverters = JavaTypeInference.getJavaBeanReadableProperties(cls).zip(fieldTypes) .map { case (property, fieldType) => val method = property.getReadMethod method -> createConverter(method.getReturnType, fieldType) } value => if (value == null) { null } else { new GenericInternalRow( methodConverters.map { case (method, converter) => converter(method.invoke(value)) }) } } def createConverter(cls: Class[_], dataType: DataType): Any => Any = dataType match { case struct: StructType => createStructConverter(cls, struct.map(_.dataType)) case _ => CatalystTypeConverters.createToCatalystConverter(dataType) } val dataConverter = createStructConverter(beanClass, attrs.map(_.dataType)) data.map(dataConverter) } /** * Extract `spark.sql.*` properties from the conf and return them as a [[Properties]]. */ private[sql] def getSQLProperties(sparkConf: SparkConf): Properties = { val properties = new Properties sparkConf.getAll.foreach { case (key, value) => if (key.startsWith("spark.sql")) { properties.setProperty(key, value) } } properties }} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} </s>
<s>[INST] <<SYS>> Below is an instruction that describes a task. Write a response that appropriately completes the request. <</SYS>> Write SparkContext class. [/INST] /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */package org.apache.sparkimport java.io._import java.net.URIimport java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}import javax.ws.rs.core.UriBuilderimport scala.collection.JavaConverters._import scala.collection.Mapimport scala.collection.concurrent.{Map => ScalaConcurrentMap}import scala.collection.immutableimport scala.collection.mutable.HashMapimport scala.language.implicitConversionsimport scala.reflect.{classTag, ClassTag}import scala.util.control.NonFatalimport com.google.common.collect.MapMakerimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.fs.{FileSystem, Path}import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}import org.apache.spark.annotation.{DeveloperApi, Experimental}import org.apache.spark.broadcast.Broadcastimport org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}import org.apache.spark.errors.SparkCoreErrorsimport org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}import org.apache.spark.internal.Loggingimport org.apache.spark.internal.config._import org.apache.spark.internal.config.Tests._import org.apache.spark.internal.config.UI._import org.apache.spark.internal.plugin.PluginContainerimport org.apache.spark.io.CompressionCodecimport org.apache.spark.launcher.JavaModuleOptionsimport org.apache.spark.metrics.source.JVMCPUSourceimport org.apache.spark.partial.{ApproximateEvaluator, PartialResult}import org.apache.spark.rdd._import org.apache.spark.resource._import org.apache.spark.resource.ResourceUtils._import org.apache.spark.rpc.RpcEndpointRefimport org.apache.spark.scheduler._import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackendimport org.apache.spark.scheduler.local.LocalSchedulerBackendimport org.apache.spark.shuffle.ShuffleDataIOUtilsimport org.apache.spark.shuffle.api.ShuffleDriverComponentsimport org.apache.spark.status.{AppStatusSource, AppStatusStore}import org.apache.spark.status.api.v1.ThreadStackTraceimport org.apache.spark.storage._import org.apache.spark.storage.BlockManagerMessages.{TriggerHeapHistogram, TriggerThreadDump}import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}import org.apache.spark.util._import org.apache.spark.util.logging.DriverLogger/** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") logInfo(s"OS info ${System.getProperty("os.name")}, ${System.getProperty("os.version")}, " + s"${System.getProperty("os.arch")}") logInfo(s"Java version ${System.getProperty("java.version")}") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv( conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf), this) } private[spark] def env: SparkEnv = _env // Used to store session UUID with a URL for each static file/jar together and // the file's local timestamp. It's session uuid -> (URL -> timestamp). private[spark] val addedFiles = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedArchives = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] val addedJars = new ConcurrentHashMap[ String, ScalaConcurrentMap[String, Long]]().asScala private[spark] def allAddedFiles = addedFiles.values.flatten.toMap private[spark] def allAddedArchives = addedArchives.values.flatten.toMap private[spark] def allAddedJars = addedJars.values.flatten.toMap // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevelIfNeeded(upperCased) if (conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL) && _schedulerBackend != null) { _schedulerBackend.updateExecutorsLogLevel(upperCased) } } try { _conf = config.clone() _conf.get(SPARK_LOG_LEVEL).foreach { level => if (Logging.setLogLevelPrinted) { System.err.printf("Setting Spark log level to \"%s\".\n", level) } setLogLevel(level) } _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } // This should be set as early as possible. SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf) SparkContext.supplementJavaModuleOptions(_conf) SparkContext.supplementJavaIPv6Options(_conf) _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (allAddedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", allAddedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (allAddedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", allAddedFiles.keys.toSeq.mkString(",")) } } if (archives != null) { archives.foreach(file => addFile(file, false, true, isArchive = true)) if (allAddedArchives.nonEmpty) { _conf.set("spark.app.initial.archive.urls", allAddedArchives.keys.toSeq.mkString(",")) } } _executorMemory = SparkContext.executorMemoryInMb(_conf) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser if (_conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) { // if OMP_NUM_THREADS is not explicitly set, override it with the value of "spark.task.cpus" // SPARK-41188: limit the thread number for OpenBLAS routine to the number of cores assigned // to this executor because some spark ML algorithms calls OpenBlAS via netlib-java // SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor // this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool // see https://github.com/numpy/numpy/issues/10455 executorEnvs.put("OMP_NUM_THREADS", _conf.get("spark.task.cpus", "1")) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) if (_conf.get(EXECUTOR_ALLOW_SYNC_LOG_LEVEL)) { _conf.get(SPARK_LOG_LEVEL) .foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel)) } val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) _applicationAttemptId.foreach { attemptId => _conf.set(APP_ATTEMPT_ID, attemptId) _env.blockManager.blockStoreClient.setAppAttemptId(attemptId) } // initialize after application id and attempt id has been initialized _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(_conf).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } if (_conf.get(UI_REVERSE_PROXY)) { val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL).getOrElse("").stripSuffix("/") System.setProperty("spark.ui.proxyBase", proxyUrl + "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager, reliableShuffleStorage = _shuffleDriverComponents.supportsReliableStorage())) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // After application started, attach handlers to started server and start handler. _ui.foreach(_.attachAllHandlers()) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } // Post init _taskScheduler.postStartHook() if (isLocal) { _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request thread dump from it.") None } } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } /** * Called by the web UI to obtain executor heap histogram. */ private[spark] def getExecutorHeapHistogram(executorId: String): Option[Array[String]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getHeapHistogram()) } else { env.blockManager.master.getExecutorEndpointRef(executorId) match { case Some(endpointRef) => Some(endpointRef.askSync[Array[String]](TriggerHeapHistogram)) case None => logWarning(s"Executor $executorId might already have stopped and " + "can not request heap histogram from it.") None } } } catch { case e: Exception => logError(s"Exception getting heap histogram from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Set the behavior of job cancellation from jobs started in this thread. * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. * * @since 3.5.0 */ def setInterruptOnCancel(interruptOnCancel: Boolean): Unit = { setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** * Add a tag to be assigned to all the jobs started by this thread. * * @param tag The tag to be added. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def addJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags + tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } /** * Remove a tag previously added to be assigned to all the jobs started by this thread. * Noop if such a tag was not added earlier. * * @param tag The tag to be removed. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def removeJobTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) val existingTags = getJobTags() val newTags = (existingTags - tag).mkString(SparkContext.SPARK_JOB_TAGS_SEP) if (newTags.isEmpty) { clearJobTags() } else { setLocalProperty(SparkContext.SPARK_JOB_TAGS, newTags) } } /** * Get the tags that are currently set to be assigned to all the jobs started by this thread. * * @since 3.5.0 */ def getJobTags(): Set[String] = { Option(getLocalProperty(SparkContext.SPARK_JOB_TAGS)) .map(_.split(SparkContext.SPARK_JOB_TAGS_SEP).toSet) .getOrElse(Set()) .filter(!_.isEmpty) // empty string tag should not happen, but be defensive } /** * Clear the current thread's job tags. * * @since 3.5.0 */ def clearJobTags(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_TAGS, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { broadcastInternal(value, serializedOnly = false) } /** * Internal version of broadcast - broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each executor only once. * * @param value value to broadcast to the Spark nodes * @param serializedOnly if true, do not cache the unserialized value on the driver * @return `Broadcast` object, a read-only variable cached on each machine */ private[spark] def broadcastInternal[T: ClassTag]( value: T, serializedOnly: Boolean): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal, serializedOnly) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = allAddedFiles.keySet.toSeq /** * :: Experimental :: * Add an archive to be downloaded and unpacked with this Spark job on every node. * * If an archive is added during execution, it will not be available until the next TaskSet * starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. * * @note A path can be added only once. Subsequent additions of the same path are ignored. * * @since 3.1.0 */ @Experimental def addArchive(path: String): Unit = { addFile(path, false, false, isArchive = true) } /** * :: Experimental :: * Returns a list of archive paths that are added to resources. * * @since 3.1.0 */ @Experimental def listArchives(): Seq[String] = allAddedArchives.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile( path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false ): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") val uri = Utils.resolveURI(path) val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw SparkCoreErrors.addLocalDirectoryError(hadoopPath) } if (!recursive && isDir) { throw SparkCoreErrors.addDirectoryError(hadoopPath) } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else if (uri.getScheme == null) { schemeCorrectedURI.toString } else { uri.toString } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis // If the session ID was specified from SparkSession, it's from a Spark Connect client. // Specify a dedicated directory for Spark Connect client. // We're running Spark Connect as a service so regular PySpark path // is not affected. lazy val root = if (jobArtifactUUID != "default") { val newDest = new File(SparkFiles.getRootDirectory(), jobArtifactUUID) newDest.mkdir() newDest } else { new File(SparkFiles.getRootDirectory()) } if ( !isArchive && addedFiles .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, root, conf, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else if ( isArchive && addedArchives .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent( UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, timestamp).isEmpty) { logInfo(s"Added archive $path at $key with timestamp $timestamp") // If the scheme is file, use URI to simply copy instead of downloading. val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) val dest = new File( root, if (uri.getFragment != null) uri.getFragment else source.getName) logInfo( s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") Utils.deleteRecursively(dest) Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { val jobArtifactUUID = JobArtifactSet .getCurrentJobArtifactState.map(_.uuid).getOrElse("default") def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp", "spark").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) Nil } } else { Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception (addLocalJarFile(new File(path)), "local") } else { val uri = Utils.resolveURI(path) // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) val uriScheme = uri.getScheme val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => Seq("file:" + uri.getPath) case "ivy" => // Since `new Path(path).toUri` will lose query information, // so here we use `URI.create(path)` DependencyUtils.resolveMavenDependencies(URI.create(path)) .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } (jarPaths, uriScheme) } if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis val (added, existed) = keys.partition(addedJars .getOrElseUpdate(jobArtifactUUID, new ConcurrentHashMap[String, Long]().asScala) .putIfAbsent(_, timestamp).isEmpty) if (added.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() } if (existed.nonEmpty) { val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" logWarning(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + " Overwriting of added jar is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = allAddedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = stop(0) /** * Shut down the SparkContext with exit code that will passed to scheduler backend. * In client mode, client side may call `SparkContext.stop()` to clean up but exit with * code not equal to 0. This behavior cause resource scheduler such as `ApplicationMaster` * exit with success status but client side exited with failed status. Spark can call * this method to stop SparkContext and pass client side correct exit code to scheduler backend. * Then scheduler backend should send the exit code to corresponding resource scheduler * to keep consistent. * * @param exitCode Specified exit code that will passed to scheduler backend in client mode. */ def stop(exitCode: Int): Unit = { logInfo(s"SparkContext is stopping with exitCode $exitCode.") if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop(exitCode) } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { FallbackStorage.cleanUp(_conf, _hadoopConfiguration) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, rdd.partitions.indices) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** * Cancel active jobs that have the specified tag. See `org.apache.spark.SparkContext.addJobTag`. * * @param tag The tag to be cancelled. Cannot contain ',' (comma) character. * * @since 3.5.0 */ def cancelJobsWithTag(tag: String): Unit = { SparkContext.throwIfInvalidTag(tag) assertNotStopped() dagScheduler.cancelJobsWithTag(tag) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = allAddedJars.keys.toSeq val addedFilePaths = allAddedFiles.keys.toSeq val addedArchivePaths = allAddedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths, env.metricsSystem.metricsProperties.asScala.toMap) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this)}/** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */object SparkContext extends Logging { private[spark] val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (Utils.isInRunningSparkTask) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_JOB_TAGS = "spark.job.tags" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" /** Separator of tags in SPARK_JOB_TAGS property */ private[spark] val SPARK_JOB_TAGS_SEP = "," // Same rules apply to Spark Connect execution tags, see ExecuteHolder.throwIfInvalidTag private[spark] def throwIfInvalidTag(tag: String) = { if (tag == null) { throw new IllegalArgumentException("Spark job tag cannot be null.") } if (tag.contains(SPARK_JOB_TAGS_SEP)) { throw new IllegalArgumentException( s"Spark job tag cannot contain '$SPARK_JOB_TAGS_SEP'.") } if (tag.isEmpty) { throw new IllegalArgumentException( "Spark job tag cannot be an empty string.") } } private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } private[spark] def executorMemoryInMb(conf: SparkConf): Int = { conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) } private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } /** * This is a helper function to complete the missing S3A magic committer configurations * based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled` */ private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = { val magicCommitterConfs = conf .getAllWithPrefix("spark.hadoop.fs.s3a.bucket.") .filter(_._1.endsWith(".committer.magic.enabled")) .filter(_._2.equalsIgnoreCase("true")) if (magicCommitterConfs.nonEmpty) { // Try to enable S3 magic committer if missing conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true") if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) { conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic") conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a", "org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory") conf.setIfMissing("spark.sql.parquet.output.committer.class", "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter") conf.setIfMissing("spark.sql.sources.commitProtocolClass", "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol") } } } /** * SPARK-36796: This is a helper function to supplement `--add-opens` options to * `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`. */ private def supplementJavaModuleOptions(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts" case None => JavaModuleOptions.defaultModuleOptions() } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) } private def supplementJavaIPv6Options(conf: SparkConf): Unit = { def supplement(key: OptionalConfigEntry[String]): Unit = { val v = conf.get(key) match { case Some(opts) => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6} $opts" case None => s"-Djava.net.preferIPv6Addresses=${Utils.preferIPv6}" } conf.set(key.key, v) } supplement(DRIVER_JAVA_OPTIONS) supplement(EXECUTOR_JAVA_OPTIONS) }}/** * A collection of regexes for extracting information from the master string. */private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r}/** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializableobject WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer than data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])}/** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializableobject WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w)} </s>