public class DatasourceInputFormat
extends org.apache.hadoop.mapreduce.InputFormat<org.apache.hadoop.io.NullWritable,io.druid.data.input.InputRow>
Modifier and Type | Field and Description |
---|---|
static String |
CONF_DRUID_SCHEMA |
static String |
CONF_INPUT_SEGMENTS |
static String |
CONF_MAX_SPLIT_SIZE |
Constructor and Description |
---|
DatasourceInputFormat() |
Modifier and Type | Method and Description |
---|---|
org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.NullWritable,io.druid.data.input.InputRow> |
createRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
org.apache.hadoop.mapreduce.TaskAttemptContext context) |
List<org.apache.hadoop.mapreduce.InputSplit> |
getSplits(org.apache.hadoop.mapreduce.JobContext context) |
public static final String CONF_INPUT_SEGMENTS
public static final String CONF_DRUID_SCHEMA
public static final String CONF_MAX_SPLIT_SIZE
public List<org.apache.hadoop.mapreduce.InputSplit> getSplits(org.apache.hadoop.mapreduce.JobContext context) throws IOException, InterruptedException
getSplits
in class org.apache.hadoop.mapreduce.InputFormat<org.apache.hadoop.io.NullWritable,io.druid.data.input.InputRow>
IOException
InterruptedException
public org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.NullWritable,io.druid.data.input.InputRow> createRecordReader(org.apache.hadoop.mapreduce.InputSplit split, org.apache.hadoop.mapreduce.TaskAttemptContext context) throws IOException, InterruptedException
createRecordReader
in class org.apache.hadoop.mapreduce.InputFormat<org.apache.hadoop.io.NullWritable,io.druid.data.input.InputRow>
IOException
InterruptedException
Copyright © 2011–2015. All rights reserved.