Class RawScan
java.lang.Object
com.here.platform.data.client.spark.datasources.raw.RawScan
- All Implemented Interfaces:
Serializable,org.apache.spark.internal.Logging,org.apache.spark.sql.connector.read.Batch,org.apache.spark.sql.connector.read.Scan,org.apache.spark.sql.connector.read.SupportsReportStatistics,org.apache.spark.sql.execution.datasources.v2.FileScan,org.apache.spark.sql.internal.connector.SupportsMetadata,scala.Equals,scala.Product
public class RawScan
extends Object
implements org.apache.spark.sql.execution.datasources.v2.FileScan, scala.Product, Serializable
- See Also:
-
Nested Class Summary
Nested classes/interfaces inherited from interface org.apache.spark.internal.Logging
org.apache.spark.internal.Logging.LogStringContext, org.apache.spark.internal.Logging.SparkShellLoggingFilterNested classes/interfaces inherited from interface org.apache.spark.sql.connector.read.Scan
org.apache.spark.sql.connector.read.Scan.ColumnarSupportMode -
Constructor Summary
ConstructorsConstructorDescriptionRawScan(org.apache.spark.sql.SparkSession sparkSession, org.apache.spark.sql.execution.datasources.PartitioningAwareFileIndex fileIndex, org.apache.spark.sql.types.StructType dataSchema, org.apache.spark.sql.types.StructType readDataSchema, org.apache.spark.sql.types.StructType readPartitionSchema, org.apache.spark.sql.util.CaseInsensitiveStringMap options, scala.collection.immutable.Seq<org.apache.spark.sql.catalyst.expressions.Expression> partitionFilters, scala.collection.immutable.Seq<org.apache.spark.sql.catalyst.expressions.Expression> dataFilters) -
Method Summary
Modifier and TypeMethodDescriptionabstract static Rapply(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) org.apache.spark.sql.connector.read.PartitionReaderFactoryscala.collection.immutable.Seq<org.apache.spark.sql.catalyst.expressions.Expression>org.apache.spark.sql.types.StructTypeorg.apache.spark.sql.execution.datasources.PartitioningAwareFileIndexintorg.apache.spark.sql.util.CaseInsensitiveStringMapoptions()scala.collection.immutable.Seq<org.apache.spark.sql.catalyst.expressions.Expression>org.apache.spark.sql.types.StructTypeorg.apache.spark.sql.types.StructTypeorg.apache.spark.sql.SparkSessionstatic StringtoString()Methods inherited from class java.lang.Object
equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, waitMethods inherited from interface scala.Equals
canEqual, equalsMethods inherited from interface org.apache.spark.sql.execution.datasources.v2.FileScan
description, equals, equivalentFilters, estimateStatistics, getFileUnSplittableReason, getMetaData, hashCode, isSplitable, org$apache$spark$sql$execution$datasources$v2$FileScan$_setter_$maxMetadataValueLength_$eq, org$apache$spark$sql$execution$datasources$v2$FileScan$_setter_$org$apache$spark$sql$execution$datasources$v2$FileScan$$isCaseSensitive_$eq, org$apache$spark$sql$execution$datasources$v2$FileScan$$isCaseSensitive, org$apache$spark$sql$execution$datasources$v2$FileScan$$normalizedDataFilters, org$apache$spark$sql$execution$datasources$v2$FileScan$$normalizedPartitionFilters, partitions, planInputPartitions, readSchema, seqToString, toBatchMethods inherited from interface org.apache.spark.internal.Logging
initializeForcefully, initializeLogIfNecessary, initializeLogIfNecessary, initializeLogIfNecessary$default$2, isTraceEnabled, log, logDebug, logDebug, logDebug, logDebug, logError, logError, logError, logError, logInfo, logInfo, logInfo, logInfo, logName, LogStringContext, logTrace, logTrace, logTrace, logTrace, logWarning, logWarning, logWarning, logWarning, org$apache$spark$internal$Logging$$log_, org$apache$spark$internal$Logging$$log__$eq, withLogContextMethods inherited from interface scala.Product
productArity, productElement, productElementName, productElementNames, productIterator, productPrefixMethods inherited from interface org.apache.spark.sql.connector.read.Scan
columnarSupportMode, reportDriverMetrics, supportedCustomMetrics, toContinuousStream, toMicroBatchStream
-
Constructor Details
-
RawScan
public RawScan(org.apache.spark.sql.SparkSession sparkSession, org.apache.spark.sql.execution.datasources.PartitioningAwareFileIndex fileIndex, org.apache.spark.sql.types.StructType dataSchema, org.apache.spark.sql.types.StructType readDataSchema, org.apache.spark.sql.types.StructType readPartitionSchema, org.apache.spark.sql.util.CaseInsensitiveStringMap options, scala.collection.immutable.Seq<org.apache.spark.sql.catalyst.expressions.Expression> partitionFilters, scala.collection.immutable.Seq<org.apache.spark.sql.catalyst.expressions.Expression> dataFilters)
-
-
Method Details
-
apply
public abstract static R apply(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) -
toString
-
maxMetadataValueLength
public int maxMetadataValueLength()- Specified by:
maxMetadataValueLengthin interfaceorg.apache.spark.sql.execution.datasources.v2.FileScan
-
sparkSession
public org.apache.spark.sql.SparkSession sparkSession()- Specified by:
sparkSessionin interfaceorg.apache.spark.sql.execution.datasources.v2.FileScan
-
fileIndex
public org.apache.spark.sql.execution.datasources.PartitioningAwareFileIndex fileIndex()- Specified by:
fileIndexin interfaceorg.apache.spark.sql.execution.datasources.v2.FileScan
-
dataSchema
public org.apache.spark.sql.types.StructType dataSchema()- Specified by:
dataSchemain interfaceorg.apache.spark.sql.execution.datasources.v2.FileScan
-
readDataSchema
public org.apache.spark.sql.types.StructType readDataSchema()- Specified by:
readDataSchemain interfaceorg.apache.spark.sql.execution.datasources.v2.FileScan
-
readPartitionSchema
public org.apache.spark.sql.types.StructType readPartitionSchema()- Specified by:
readPartitionSchemain interfaceorg.apache.spark.sql.execution.datasources.v2.FileScan
-
options
public org.apache.spark.sql.util.CaseInsensitiveStringMap options() -
partitionFilters
public scala.collection.immutable.Seq<org.apache.spark.sql.catalyst.expressions.Expression> partitionFilters()- Specified by:
partitionFiltersin interfaceorg.apache.spark.sql.execution.datasources.v2.FileScan
-
dataFilters
public scala.collection.immutable.Seq<org.apache.spark.sql.catalyst.expressions.Expression> dataFilters()- Specified by:
dataFiltersin interfaceorg.apache.spark.sql.execution.datasources.v2.FileScan
-
createReaderFactory
public org.apache.spark.sql.connector.read.PartitionReaderFactory createReaderFactory()- Specified by:
createReaderFactoryin interfaceorg.apache.spark.sql.connector.read.Batch
-