spark HadoopConfDriverFeatureStep 源码
spark HadoopConfDriverFeatureStep 代码
文件路径:/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/HadoopConfDriverFeatureStep.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.features
import java.io.File
import java.nio.charset.StandardCharsets
import scala.collection.JavaConverters._
import com.google.common.io.Files
import io.fabric8.kubernetes.api.model._
import org.apache.spark.deploy.k8s.{KubernetesConf, KubernetesUtils, SparkPod}
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
/**
* Mounts the Hadoop configuration - either a pre-defined config map, or a local configuration
* directory - on the driver pod.
*/
private[spark] class HadoopConfDriverFeatureStep(conf: KubernetesConf)
extends KubernetesFeatureConfigStep {
private val confDir = Option(conf.sparkConf.getenv(ENV_HADOOP_CONF_DIR))
private val existingConfMap = conf.get(KUBERNETES_HADOOP_CONF_CONFIG_MAP)
KubernetesUtils.requireNandDefined(
confDir,
existingConfMap,
"Do not specify both the `HADOOP_CONF_DIR` in your ENV and the ConfigMap " +
"as the creation of an additional ConfigMap, when one is already specified is extraneous")
private lazy val confFiles: Seq[File] = {
val dir = new File(confDir.get)
if (dir.isDirectory) {
dir.listFiles.filter(_.isFile).toSeq
} else {
Nil
}
}
private def newConfigMapName: String = s"${conf.resourceNamePrefix}-hadoop-config"
private def hasHadoopConf: Boolean = confDir.isDefined || existingConfMap.isDefined
override def configurePod(original: SparkPod): SparkPod = {
original.transform { case pod if hasHadoopConf =>
val confVolume = if (confDir.isDefined) {
val keyPaths = confFiles.map { file =>
new KeyToPathBuilder()
.withKey(file.getName())
.withPath(file.getName())
.build()
}
new VolumeBuilder()
.withName(HADOOP_CONF_VOLUME)
.withNewConfigMap()
.withName(newConfigMapName)
.withItems(keyPaths.asJava)
.endConfigMap()
.build()
} else {
new VolumeBuilder()
.withName(HADOOP_CONF_VOLUME)
.withNewConfigMap()
.withName(existingConfMap.get)
.endConfigMap()
.build()
}
val podWithConf = new PodBuilder(pod.pod)
.editSpec()
.addNewVolumeLike(confVolume)
.endVolume()
.endSpec()
.build()
val containerWithMount = new ContainerBuilder(pod.container)
.addNewVolumeMount()
.withName(HADOOP_CONF_VOLUME)
.withMountPath(HADOOP_CONF_DIR_PATH)
.endVolumeMount()
.addNewEnv()
.withName(ENV_HADOOP_CONF_DIR)
.withValue(HADOOP_CONF_DIR_PATH)
.endEnv()
.build()
SparkPod(podWithConf, containerWithMount)
}
}
override def getAdditionalKubernetesResources(): Seq[HasMetadata] = {
if (confDir.isDefined) {
val fileMap = confFiles.map { file =>
(file.getName(), Files.toString(file, StandardCharsets.UTF_8))
}.toMap.asJava
Seq(new ConfigMapBuilder()
.withNewMetadata()
.withName(newConfigMapName)
.endMetadata()
.withImmutable(true)
.addToData(fileMap)
.build())
} else {
Nil
}
}
}
相关信息
相关文章
spark BasicDriverFeatureStep 源码
spark BasicExecutorFeatureStep 源码
spark DriverCommandFeatureStep 源码
spark DriverKubernetesCredentialsFeatureStep 源码
spark DriverServiceFeatureStep 源码
spark EnvSecretsFeatureStep 源码
spark ExecutorKubernetesCredentialsFeatureStep 源码
spark KerberosConfDriverFeatureStep 源码
0
赞
- 所属分类: 前端技术
- 本文标签:
热门推荐
-
2、 - 优质文章
-
3、 gate.io
-
7、 golang
-
9、 openharmony
-
10、 Vue中input框自动聚焦