case class DriverConfig(appName: String, parallelUploads: Int, parallelRetrievers: Int, numCommitPartitions: Int, sparkStorageLevels: SparkStorageLevelsConfig, state: StateConfig, disableIncremental: Boolean, uniquePartitionLimitInBytes: Int, disableCommitIntegrityCheck: Boolean, allowEmptyPayloads: Boolean, catalogs: CatalogsConfig) extends Product with Serializable
The configuration necessary to instantiate and configure a com.here.platform.data.processing.driver.Driver.
- appName
The name of the application to be set in the Spark context.
- parallelUploads
The number of parallel uploads the library should perform inside a Spark task, when data is published to the Blob API.
- parallelRetrievers
The number of parallel retrieves the library should perform inside a Spark task, when data is retrieved from the Blob API.
- numCommitPartitions
The maximum number of parts to commit within a multipart commit to the Data API.
- sparkStorageLevels
The configuration of the Spark storage levels for each RDD category in the library.
- state
The configuration for the state layer that specifies how the layer is stored.
- disableIncremental
If true, incremental compilation is disabled.
- uniquePartitionLimitInBytes
The size limit beyond which partitions are considered to be unique. The data handle for partitions with identical content is reused to avoid uploading the same payload multiple times.
- disableCommitIntegrityCheck
If true, the final integrity check on the committed partitions is disabled.
- allowEmptyPayloads
Whether to allow publishing of empty (0 byte) payloads.
- catalogs
catalog specific driver configurations.
- Alphabetic
- By Inheritance
- DriverConfig
- Serializable
- Product
- Equals
- AnyRef
- Any
- Hide All
- Show All
- Public
- Protected
Instance Constructors
- new DriverConfig(appName: String, parallelUploads: Int, parallelRetrievers: Int, numCommitPartitions: Int, sparkStorageLevels: SparkStorageLevelsConfig, state: StateConfig, disableIncremental: Boolean, uniquePartitionLimitInBytes: Int, disableCommitIntegrityCheck: Boolean, allowEmptyPayloads: Boolean, catalogs: CatalogsConfig)
- appName
The name of the application to be set in the Spark context.
- parallelUploads
The number of parallel uploads the library should perform inside a Spark task, when data is published to the Blob API.
- parallelRetrievers
The number of parallel retrieves the library should perform inside a Spark task, when data is retrieved from the Blob API.
- numCommitPartitions
The maximum number of parts to commit within a multipart commit to the Data API.
- sparkStorageLevels
The configuration of the Spark storage levels for each RDD category in the library.
- state
The configuration for the state layer that specifies how the layer is stored.
- disableIncremental
If true, incremental compilation is disabled.
- uniquePartitionLimitInBytes
The size limit beyond which partitions are considered to be unique. The data handle for partitions with identical content is reused to avoid uploading the same payload multiple times.
- disableCommitIntegrityCheck
If true, the final integrity check on the committed partitions is disabled.
- allowEmptyPayloads
Whether to allow publishing of empty (0 byte) payloads.
- catalogs
catalog specific driver configurations.
Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- val allowEmptyPayloads: Boolean
- val appName: String
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- val catalogs: CatalogsConfig
- def clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.CloneNotSupportedException]) @IntrinsicCandidate() @native()
- val disableCommitIntegrityCheck: Boolean
- val disableIncremental: Boolean
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @IntrinsicCandidate() @native()
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @IntrinsicCandidate() @native()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @IntrinsicCandidate() @native()
- val numCommitPartitions: Int
- val parallelRetrievers: Int
- val parallelUploads: Int
- def productElementNames: Iterator[String]
- Definition Classes
- Product
- val sparkStorageLevels: SparkStorageLevelsConfig
- val state: StateConfig
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- val uniquePartitionLimitInBytes: Int
- final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
Deprecated Value Members
- def finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.Throwable]) @Deprecated
- Deprecated
(Since version 9)