Modifier and Type | Method and Description |
---|---|
InputRow |
BenchmarkDataGenerator.nextRow() |
Modifier and Type | Class and Description |
---|---|
class |
MapBasedInputRow |
Modifier and Type | Method and Description |
---|---|
InputRow |
FirehoseV2.currRow() |
InputRow |
Firehose.nextRow()
The next row available.
|
Modifier and Type | Method and Description |
---|---|
List<InputRow> |
AvroStreamInputRowParser.parseBatch(ByteBuffer input) |
List<InputRow> |
AvroHadoopInputRowParser.parseBatch(org.apache.avro.generic.GenericRecord record) |
Modifier and Type | Method and Description |
---|---|
static List<Object> |
Rows.toGroupKey(long timeStamp,
InputRow inputRow) |
Modifier and Type | Method and Description |
---|---|
static List<InputRow> |
AvroParsers.parseGenericRecord(org.apache.avro.generic.GenericRecord record,
ParseSpec parseSpec,
ObjectFlattener<org.apache.avro.generic.GenericRecord> avroFlattener) |
Modifier and Type | Method and Description |
---|---|
InputRow |
FileIteratingFirehose.nextRow() |
InputRow |
StringInputRowParser.parse(String input) |
default InputRow |
InputRowParser.parse(T input)
Deprecated.
|
Modifier and Type | Method and Description |
---|---|
List<InputRow> |
StringInputRowParser.parseBatch(ByteBuffer input) |
List<InputRow> |
NoopInputRowParser.parseBatch(InputRow input) |
List<InputRow> |
MapInputRowParser.parseBatch(Map<String,Object> theMap) |
default List<InputRow> |
InputRowParser.parseBatch(T input)
Parse an input into list of
InputRow . |
Modifier and Type | Method and Description |
---|---|
List<InputRow> |
NoopInputRowParser.parseBatch(InputRow input) |
Modifier and Type | Method and Description |
---|---|
List<InputRow> |
OrcHadoopInputRowParser.parseBatch(org.apache.hadoop.hive.ql.io.orc.OrcStruct input) |
Modifier and Type | Method and Description |
---|---|
List<InputRow> |
ParquetHadoopInputRowParser.parseBatch(org.apache.avro.generic.GenericRecord record)
imitate avro extension
AvroParsers.parseGenericRecord(GenericRecord, ParseSpec, ObjectFlattener) |
Modifier and Type | Method and Description |
---|---|
List<InputRow> |
ProtobufInputRowParser.parseBatch(ByteBuffer input) |
Modifier and Type | Method and Description |
---|---|
List<InputRow> |
ThriftInputRowParser.parseBatch(Object input) |
Modifier and Type | Method and Description |
---|---|
static InputRow |
InputRowSerde.fromBytes(Map<String,InputRowSerde.IndexSerdeTypeHelper> typeHelperMap,
byte[] data,
AggregatorFactory[] aggs) |
Modifier and Type | Method and Description |
---|---|
List<InputRow> |
HadoopyStringInputRowParser.parseBatch(Object input) |
Modifier and Type | Method and Description |
---|---|
com.google.common.base.Optional<Bucket> |
HadoopDruidIndexerConfig.getBucket(InputRow inputRow)
Get the proper bucket for some input row.
|
protected void |
IndexGeneratorJob.IndexGeneratorMapper.innerMap(InputRow inputRow,
org.apache.hadoop.mapreduce.Mapper.Context context,
boolean reportParseExceptions) |
protected void |
DeterminePartitionsJob.DeterminePartitionsGroupByMapper.innerMap(InputRow inputRow,
org.apache.hadoop.mapreduce.Mapper.Context context,
boolean reportParseExceptions) |
protected void |
DeterminePartitionsJob.DeterminePartitionsDimSelectionAssumeGroupedMapper.innerMap(InputRow inputRow,
org.apache.hadoop.mapreduce.Mapper.Context context,
boolean reportParseExceptions) |
protected abstract void |
HadoopDruidIndexerMapper.innerMap(InputRow inputRow,
org.apache.hadoop.mapreduce.Mapper.Context context,
boolean reportParseExceptions) |
protected void |
DetermineHashedPartitionsJob.DetermineCardinalityMapper.innerMap(InputRow inputRow,
org.apache.hadoop.mapreduce.Mapper.Context context,
boolean reportParseExceptions) |
static byte[] |
InputRowSerde.toBytes(Map<String,InputRowSerde.IndexSerdeTypeHelper> typeHelperMap,
InputRow row,
AggregatorFactory[] aggs,
boolean reportParseExceptions) |
Modifier and Type | Class and Description |
---|---|
class |
SegmentInputRow
SegmentInputRow serves as a marker that these InputRow instances have already been combined
and they contain the columns as they show up in the segment after ingestion, not what you would see in raw
data.
|
Modifier and Type | Method and Description |
---|---|
InputRow |
DatasourceRecordReader.getCurrentValue() |
Modifier and Type | Method and Description |
---|---|
org.apache.hadoop.mapreduce.RecordReader<org.apache.hadoop.io.NullWritable,InputRow> |
DatasourceInputFormat.createRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
org.apache.hadoop.mapreduce.TaskAttemptContext context) |
Modifier and Type | Method and Description |
---|---|
SegmentIdentifier |
ActionBasedSegmentAllocator.allocate(InputRow row,
String sequenceName,
String previousSegmentId,
boolean skipSegmentLineageCheck) |
Modifier and Type | Method and Description |
---|---|
InputRow |
SpatialDimensionRowTransformer.apply(InputRow row) |
InputRow |
IncrementalIndex.formatRow(InputRow row) |
Modifier and Type | Method and Description |
---|---|
int |
IncrementalIndex.add(InputRow row)
Adds a new row.
|
int |
IncrementalIndex.add(InputRow row,
boolean skipMaxRowsInMemoryCheck) |
protected Integer |
OnheapIncrementalIndex.addToFacts(AggregatorFactory[] metrics,
boolean deserializeComplexMetrics,
boolean reportParseExceptions,
InputRow row,
AtomicInteger numEntries,
IncrementalIndex.TimeAndDims key,
ThreadLocal<InputRow> rowContainer,
com.google.common.base.Supplier<InputRow> rowSupplier,
boolean skipMaxRowsInMemoryCheck) |
protected Integer |
OffheapIncrementalIndex.addToFacts(AggregatorFactory[] metrics,
boolean deserializeComplexMetrics,
boolean reportParseExceptions,
InputRow row,
AtomicInteger numEntries,
IncrementalIndex.TimeAndDims key,
ThreadLocal<InputRow> rowContainer,
com.google.common.base.Supplier<InputRow> rowSupplier,
boolean skipMaxRowsInMemoryCheck) |
protected abstract Integer |
IncrementalIndex.addToFacts(AggregatorFactory[] metrics,
boolean deserializeComplexMetrics,
boolean reportParseExceptions,
InputRow row,
AtomicInteger numEntries,
IncrementalIndex.TimeAndDims key,
ThreadLocal<InputRow> rowContainer,
com.google.common.base.Supplier<InputRow> rowSupplier,
boolean skipMaxRowsInMemoryCheck) |
InputRow |
SpatialDimensionRowTransformer.apply(InputRow row) |
InputRow |
IncrementalIndex.formatRow(InputRow row) |
Modifier and Type | Method and Description |
---|---|
protected Integer |
OnheapIncrementalIndex.addToFacts(AggregatorFactory[] metrics,
boolean deserializeComplexMetrics,
boolean reportParseExceptions,
InputRow row,
AtomicInteger numEntries,
IncrementalIndex.TimeAndDims key,
ThreadLocal<InputRow> rowContainer,
com.google.common.base.Supplier<InputRow> rowSupplier,
boolean skipMaxRowsInMemoryCheck) |
protected Integer |
OnheapIncrementalIndex.addToFacts(AggregatorFactory[] metrics,
boolean deserializeComplexMetrics,
boolean reportParseExceptions,
InputRow row,
AtomicInteger numEntries,
IncrementalIndex.TimeAndDims key,
ThreadLocal<InputRow> rowContainer,
com.google.common.base.Supplier<InputRow> rowSupplier,
boolean skipMaxRowsInMemoryCheck) |
protected Integer |
OffheapIncrementalIndex.addToFacts(AggregatorFactory[] metrics,
boolean deserializeComplexMetrics,
boolean reportParseExceptions,
InputRow row,
AtomicInteger numEntries,
IncrementalIndex.TimeAndDims key,
ThreadLocal<InputRow> rowContainer,
com.google.common.base.Supplier<InputRow> rowSupplier,
boolean skipMaxRowsInMemoryCheck) |
protected Integer |
OffheapIncrementalIndex.addToFacts(AggregatorFactory[] metrics,
boolean deserializeComplexMetrics,
boolean reportParseExceptions,
InputRow row,
AtomicInteger numEntries,
IncrementalIndex.TimeAndDims key,
ThreadLocal<InputRow> rowContainer,
com.google.common.base.Supplier<InputRow> rowSupplier,
boolean skipMaxRowsInMemoryCheck) |
protected abstract Integer |
IncrementalIndex.addToFacts(AggregatorFactory[] metrics,
boolean deserializeComplexMetrics,
boolean reportParseExceptions,
InputRow row,
AtomicInteger numEntries,
IncrementalIndex.TimeAndDims key,
ThreadLocal<InputRow> rowContainer,
com.google.common.base.Supplier<InputRow> rowSupplier,
boolean skipMaxRowsInMemoryCheck) |
protected abstract Integer |
IncrementalIndex.addToFacts(AggregatorFactory[] metrics,
boolean deserializeComplexMetrics,
boolean reportParseExceptions,
InputRow row,
AtomicInteger numEntries,
IncrementalIndex.TimeAndDims key,
ThreadLocal<InputRow> rowContainer,
com.google.common.base.Supplier<InputRow> rowSupplier,
boolean skipMaxRowsInMemoryCheck) |
protected Aggregator[] |
OnheapIncrementalIndex.initAggs(AggregatorFactory[] metrics,
com.google.common.base.Supplier<InputRow> rowSupplier,
boolean deserializeComplexMetrics,
boolean concurrentEventAdd) |
protected BufferAggregator[] |
OffheapIncrementalIndex.initAggs(AggregatorFactory[] metrics,
com.google.common.base.Supplier<InputRow> rowSupplier,
boolean deserializeComplexMetrics,
boolean concurrentEventAdd) |
protected abstract AggregatorType[] |
IncrementalIndex.initAggs(AggregatorFactory[] metrics,
com.google.common.base.Supplier<InputRow> rowSupplier,
boolean deserializeComplexMetrics,
boolean concurrentEventAdd) |
protected ColumnSelectorFactory |
IncrementalIndex.makeColumnSelectorFactory(AggregatorFactory agg,
com.google.common.base.Supplier<InputRow> in,
boolean deserializeComplexMetrics) |
static ColumnSelectorFactory |
IncrementalIndex.makeColumnSelectorFactory(VirtualColumns virtualColumns,
AggregatorFactory agg,
com.google.common.base.Supplier<InputRow> in,
boolean deserializeComplexMetrics)
Column selector used at ingestion time for inputs to aggregators.
|
Modifier and Type | Method and Description |
---|---|
AppenderatorDriverAddResult |
BatchAppenderatorDriver.add(InputRow row,
String sequenceName)
Add a row.
|
AppenderatorDriverAddResult |
StreamAppenderatorDriver.add(InputRow row,
String sequenceName,
com.google.common.base.Supplier<Committer> committerSupplier,
boolean skipSegmentLineageCheck,
boolean allowIncrementalPersists)
Add a row.
|
int |
AppenderatorPlumber.add(InputRow row,
com.google.common.base.Supplier<Committer> committerSupplier) |
default Appenderator.AppenderatorAddResult |
Appenderator.add(SegmentIdentifier identifier,
InputRow row,
com.google.common.base.Supplier<Committer> committerSupplier)
Same as
Appenderator.add(SegmentIdentifier, InputRow, Supplier, boolean) , with allowIncrementalPersists set to true |
Appenderator.AppenderatorAddResult |
AppenderatorImpl.add(SegmentIdentifier identifier,
InputRow row,
com.google.common.base.Supplier<Committer> committerSupplier,
boolean allowIncrementalPersists) |
Appenderator.AppenderatorAddResult |
Appenderator.add(SegmentIdentifier identifier,
InputRow row,
com.google.common.base.Supplier<Committer> committerSupplier,
boolean allowIncrementalPersists)
Add a row.
|
SegmentIdentifier |
SegmentAllocator.allocate(InputRow row,
String sequenceName,
String previousSegmentId,
boolean skipSegmentLineageCheck)
Allocates a new segment for a given timestamp.
|
protected AppenderatorDriverAddResult |
BaseAppenderatorDriver.append(InputRow row,
String sequenceName,
com.google.common.base.Supplier<Committer> committerSupplier,
boolean skipSegmentLineageCheck,
boolean allowIncrementalPersists)
Add a row.
|
Modifier and Type | Method and Description |
---|---|
InputRow |
IrcDecoder.decodeMessage(org.joda.time.DateTime timestamp,
String channel,
String msg) |
InputRow |
PredicateFirehose.nextRow() |
InputRow |
IngestSegmentFirehose.nextRow() |
InputRow |
EventReceiverFirehoseFactory.EventReceiverFirehose.nextRow() |
Modifier and Type | Method and Description |
---|---|
List<InputRow> |
IrcInputRowParser.parseBatch(Pair<org.joda.time.DateTime,com.ircclouds.irc.api.domain.messages.ChannelPrivMsg> msg) |
Modifier and Type | Method and Description |
---|---|
void |
EventReceiverFirehoseFactory.EventReceiverFirehose.addRows(Iterable<InputRow> rows) |
Constructor and Description |
---|
PredicateFirehose(Firehose firehose,
com.google.common.base.Predicate<InputRow> predicate) |
Modifier and Type | Method and Description |
---|---|
int |
Sink.add(InputRow row,
boolean skipMaxRowsInMemoryCheck) |
int |
RealtimePlumber.add(InputRow row,
com.google.common.base.Supplier<Committer> committerSupplier) |
int |
Plumber.add(InputRow row,
com.google.common.base.Supplier<Committer> committerSupplier) |
Modifier and Type | Method and Description |
---|---|
Object |
ComplexMetricExtractor.extractValue(InputRow inputRow,
String metricName) |
Modifier and Type | Class and Description |
---|---|
static class |
Transformer.TransformedInputRow |
Modifier and Type | Method and Description |
---|---|
InputRow |
TransformingStringInputRowParser.parse(String input) |
InputRow |
Transformer.transform(InputRow row)
Transforms an input row, or returns null if the row should be filtered out.
|
Modifier and Type | Method and Description |
---|---|
List<InputRow> |
TransformingStringInputRowParser.parseBatch(ByteBuffer input) |
List<InputRow> |
TransformingInputRowParser.parseBatch(T row) |
Modifier and Type | Method and Description |
---|---|
InputRow |
Transformer.transform(InputRow row)
Transforms an input row, or returns null if the row should be filtered out.
|
Constructor and Description |
---|
TransformedInputRow(InputRow row,
Map<String,RowFunction> transforms) |
Modifier and Type | Method and Description |
---|---|
ShardSpec |
ShardSpecLookup.getShardSpec(long timestamp,
InputRow row) |
protected int |
HashBasedNumberedShardSpec.hash(long timestamp,
InputRow inputRow) |
boolean |
ShardSpec.isInChunk(long timestamp,
InputRow inputRow) |
boolean |
NoneShardSpec.isInChunk(long timestamp,
InputRow inputRow) |
boolean |
SingleDimensionShardSpec.isInChunk(long timestamp,
InputRow inputRow) |
boolean |
NumberedShardSpec.isInChunk(long timestamp,
InputRow inputRow) |
boolean |
LinearShardSpec.isInChunk(long timestamp,
InputRow inputRow) |
boolean |
HashBasedNumberedShardSpec.isInChunk(long timestamp,
InputRow inputRow) |
Copyright © 2011–2018. All rights reserved.