Modifier and Type | Method and Description |
---|---|
DataSegment |
SegmentLoadInfo.getSegment() |
DataSegment |
ImmutableSegmentLoadInfo.getSegment() |
DataSegment |
ImmutableDruidServer.getSegment(String segmentName) |
DataSegment |
DruidServer.getSegment(String segmentName) |
Modifier and Type | Method and Description |
---|---|
Map<String,DataSegment> |
ImmutableDruidDataSource.getPartitionNames() |
Map<String,DataSegment> |
ImmutableDruidServer.getSegments() |
Set<DataSegment> |
ImmutableDruidDataSource.getSegments() |
Map<String,DataSegment> |
DruidServer.getSegments() |
Set<DataSegment> |
DruidDataSource.getSegments() |
Modifier and Type | Method and Description |
---|---|
DruidServer |
DruidServer.addDataSegment(String segmentId,
DataSegment segment) |
protected DruidServer |
SingleServerInventoryView.addInnerInventory(DruidServer container,
String inventoryKey,
DataSegment inventory) |
DruidDataSource |
DruidDataSource.addSegment(String partitionName,
DataSegment dataSegment) |
protected void |
ServerInventoryView.addSingleInventory(DruidServer container,
DataSegment inventory) |
ServerView.CallbackAction |
ServerView.SegmentCallback.segmentAdded(DruidServerMetadata server,
DataSegment segment)
Called when a segment is added to a server.
|
ServerView.CallbackAction |
ServerView.BaseSegmentCallback.segmentAdded(DruidServerMetadata server,
DataSegment segment) |
ServerView.CallbackAction |
ServerView.SegmentCallback.segmentRemoved(DruidServerMetadata server,
DataSegment segment)
Called when a segment is removed from a server.
|
ServerView.CallbackAction |
ServerView.BaseSegmentCallback.segmentRemoved(DruidServerMetadata server,
DataSegment segment) |
protected DruidServer |
SingleServerInventoryView.updateInnerInventory(DruidServer container,
String inventoryKey,
DataSegment inventory) |
Modifier and Type | Method and Description |
---|---|
protected DruidServer |
BatchServerInventoryView.addInnerInventory(DruidServer container,
String inventoryKey,
Set<DataSegment> inventory) |
DruidDataSource |
DruidDataSource.addSegments(Map<String,DataSegment> partitionMap) |
void |
SingleServerInventoryView.registerSegmentCallback(Executor exec,
ServerView.SegmentCallback callback,
com.google.common.base.Predicate<Pair<DruidServerMetadata,DataSegment>> filter) |
void |
FilteredServerInventoryView.registerSegmentCallback(Executor exec,
ServerView.SegmentCallback callback,
com.google.common.base.Predicate<Pair<DruidServerMetadata,DataSegment>> filter) |
void |
BatchServerInventoryView.registerSegmentCallback(Executor exec,
ServerView.SegmentCallback callback,
com.google.common.base.Predicate<Pair<DruidServerMetadata,DataSegment>> filter) |
protected DruidServer |
BatchServerInventoryView.updateInnerInventory(DruidServer container,
String inventoryKey,
Set<DataSegment> inventory) |
Constructor and Description |
---|
ImmutableSegmentLoadInfo(DataSegment segment,
Set<DruidServerMetadata> servers) |
SegmentLoadInfo(DataSegment segment) |
Constructor and Description |
---|
BatchServerInventoryView(ZkPathsConfig zkPaths,
org.apache.curator.framework.CuratorFramework curator,
com.fasterxml.jackson.databind.ObjectMapper jsonMapper,
com.google.common.base.Predicate<Pair<DruidServerMetadata,DataSegment>> defaultFilter) |
ImmutableDruidDataSource(String name,
com.google.common.collect.ImmutableMap<String,String> properties,
com.google.common.collect.ImmutableMap<String,DataSegment> partitionNames,
com.google.common.collect.ImmutableSet<DataSegment> segmentsHolder) |
ImmutableDruidDataSource(String name,
com.google.common.collect.ImmutableMap<String,String> properties,
com.google.common.collect.ImmutableMap<String,DataSegment> partitionNames,
com.google.common.collect.ImmutableSet<DataSegment> segmentsHolder) |
ImmutableDruidServer(DruidServerMetadata metadata,
long currSize,
com.google.common.collect.ImmutableMap<String,ImmutableDruidDataSource> dataSources,
com.google.common.collect.ImmutableMap<String,DataSegment> segments) |
SingleServerInventoryView(ZkPathsConfig zkPaths,
org.apache.curator.framework.CuratorFramework curator,
com.fasterxml.jackson.databind.ObjectMapper jsonMapper,
com.google.common.base.Predicate<Pair<DruidServerMetadata,DataSegment>> defaultFilter) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
ClientConversionQuery.getSegment() |
Modifier and Type | Method and Description |
---|---|
List<DataSegment> |
ClientMergeQuery.getSegments() |
List<DataSegment> |
ClientAppendQuery.getSegments() |
Modifier and Type | Method and Description |
---|---|
void |
IndexingServiceClient.upgradeSegment(DataSegment dataSegment) |
Modifier and Type | Method and Description |
---|---|
void |
IndexingServiceClient.mergeSegments(List<DataSegment> segments) |
Constructor and Description |
---|
ClientConversionQuery(DataSegment segment) |
Constructor and Description |
---|
ClientAppendQuery(String dataSource,
List<DataSegment> segments) |
ClientMergeQuery(String dataSource,
List<DataSegment> segments,
List<AggregatorFactory> aggregators) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
ServerSelector.getSegment() |
Modifier and Type | Method and Description |
---|---|
void |
ServerSelector.addServerAndUpdateSegment(QueryableDruidServer server,
DataSegment segment) |
QueryableDruidServer |
ServerSelectorStrategy.pick(Set<QueryableDruidServer> servers,
DataSegment segment) |
QueryableDruidServer |
RandomServerSelectorStrategy.pick(Set<QueryableDruidServer> servers,
DataSegment segment) |
QueryableDruidServer |
ConnectionCountServerSelectorStrategy.pick(Set<QueryableDruidServer> servers,
DataSegment segment) |
QueryableDruidServer |
TierSelectorStrategy.pick(TreeMap<Integer,Set<QueryableDruidServer>> prioritizedServers,
DataSegment segment) |
QueryableDruidServer |
AbstractTierSelectorStrategy.pick(TreeMap<Integer,Set<QueryableDruidServer>> prioritizedServers,
DataSegment segment) |
Constructor and Description |
---|
ServerSelector(DataSegment segment,
TierSelectorStrategy strategy) |
Modifier and Type | Method and Description |
---|---|
static DataSegment |
JobHelper.serializeOutIndex(DataSegment segmentTemplate,
org.apache.hadoop.conf.Configuration configuration,
org.apache.hadoop.util.Progressable progressable,
File mergedBase,
org.apache.hadoop.fs.Path finalIndexZipFilePath,
org.apache.hadoop.fs.Path finalDescriptorPath,
org.apache.hadoop.fs.Path tmpPath) |
Modifier and Type | Method and Description |
---|---|
List<DataSegment> |
HadoopDruidIndexerJob.getPublishedSegments() |
static List<DataSegment> |
IndexGeneratorJob.getPublishedSegments(HadoopDruidIndexerConfig config) |
Modifier and Type | Method and Description |
---|---|
static URI |
JobHelper.getURIFromSegment(DataSegment dataSegment) |
org.apache.hadoop.fs.Path |
HadoopDruidIndexerConfig.makeDescriptorInfoPath(DataSegment segment) |
static org.apache.hadoop.fs.Path |
JobHelper.makeFileNamePath(org.apache.hadoop.fs.Path basePath,
org.apache.hadoop.fs.FileSystem fs,
DataSegment segmentTemplate,
String baseFileName) |
static org.apache.hadoop.fs.Path |
JobHelper.makeTmpPath(org.apache.hadoop.fs.Path basePath,
org.apache.hadoop.fs.FileSystem fs,
DataSegment segmentTemplate,
org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID) |
static DataSegment |
JobHelper.serializeOutIndex(DataSegment segmentTemplate,
org.apache.hadoop.conf.Configuration configuration,
org.apache.hadoop.util.Progressable progressable,
File mergedBase,
org.apache.hadoop.fs.Path finalIndexZipFilePath,
org.apache.hadoop.fs.Path finalDescriptorPath,
org.apache.hadoop.fs.Path tmpPath) |
static void |
JobHelper.writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem outputFS,
DataSegment segment,
org.apache.hadoop.fs.Path descriptorPath,
org.apache.hadoop.util.Progressable progressable) |
Modifier and Type | Method and Description |
---|---|
void |
MetadataStorageUpdaterJobHandler.publishSegments(String tableName,
List<DataSegment> segments,
com.fasterxml.jackson.databind.ObjectMapper mapper) |
void |
SQLMetadataStorageUpdaterJobHandler.publishSegments(String tableName,
List<DataSegment> segments,
com.fasterxml.jackson.databind.ObjectMapper mapper) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
WindowedDataSegment.getSegment() |
Modifier and Type | Method and Description |
---|---|
List<DataSegment> |
DatasourceIngestionSpec.getSegments() |
Modifier and Type | Method and Description |
---|---|
static WindowedDataSegment |
WindowedDataSegment.of(DataSegment segment) |
Constructor and Description |
---|
WindowedDataSegment(DataSegment segment,
org.joda.time.Interval interval) |
Constructor and Description |
---|
DatasourceIngestionSpec(String dataSource,
org.joda.time.Interval interval,
List<org.joda.time.Interval> intervals,
List<DataSegment> segments,
DimFilter filter,
Granularity granularity,
List<String> dimensions,
List<String> metrics,
boolean ignoreWhenNoSegments) |
Modifier and Type | Method and Description |
---|---|
List<DataSegment> |
UsedSegmentLister.getUsedSegmentsForIntervals(String dataSource,
List<org.joda.time.Interval> intervals)
Get all segments which may include any data in the interval and are flagged as used.
|
List<DataSegment> |
MetadataStoreBasedUsedSegmentLister.getUsedSegmentsForIntervals(String dataSource,
List<org.joda.time.Interval> intervals) |
Modifier and Type | Method and Description |
---|---|
List<DataSegment> |
HadoopDruidConverterConfig.getSegments() |
List<DataSegment> |
HadoopConverterJob.run() |
Modifier and Type | Method and Description |
---|---|
static void |
HadoopConverterJob.converterConfigIntoConfiguration(HadoopDruidConverterConfig priorConfig,
List<DataSegment> segments,
org.apache.hadoop.conf.Configuration configuration) |
Constructor and Description |
---|
HadoopDruidConverterConfig(String dataSource,
org.joda.time.Interval interval,
IndexSpec indexSpec,
List<DataSegment> segments,
Boolean validate,
URI distributedSuccessCache,
Map<String,String> hadoopProperties,
String jobPriority,
String segmentOutputPath) |
Modifier and Type | Method and Description |
---|---|
Set<DataSegment> |
ActionBasedUsedSegmentChecker.findUsedSegments(Set<SegmentIdentifier> identifiers) |
Modifier and Type | Method and Description |
---|---|
Map<DataSegment,File> |
TaskToolbox.fetchSegments(List<DataSegment> segments) |
Modifier and Type | Method and Description |
---|---|
Map<DataSegment,File> |
TaskToolbox.fetchSegments(List<DataSegment> segments) |
void |
TaskToolbox.publishSegments(Iterable<DataSegment> segments) |
Modifier and Type | Method and Description |
---|---|
com.fasterxml.jackson.core.type.TypeReference<List<DataSegment>> |
SegmentListUsedAction.getReturnTypeReference() |
com.fasterxml.jackson.core.type.TypeReference<List<DataSegment>> |
SegmentListUnusedAction.getReturnTypeReference() |
com.fasterxml.jackson.core.type.TypeReference<Set<DataSegment>> |
SegmentInsertAction.getReturnTypeReference() |
Set<DataSegment> |
SegmentTransactionalInsertAction.getSegments() |
Set<DataSegment> |
SegmentNukeAction.getSegments() |
Set<DataSegment> |
SegmentMetadataUpdateAction.getSegments() |
Set<DataSegment> |
SegmentInsertAction.getSegments() |
List<DataSegment> |
SegmentListUsedAction.perform(Task task,
TaskActionToolbox toolbox) |
List<DataSegment> |
SegmentListUnusedAction.perform(Task task,
TaskActionToolbox toolbox) |
Set<DataSegment> |
SegmentInsertAction.perform(Task task,
TaskActionToolbox toolbox)
Behaves similarly to
IndexerMetadataStorageCoordinator.announceHistoricalSegments(Set, DataSourceMetadata, DataSourceMetadata) ,
with startMetadata and endMetadata both null. |
Modifier and Type | Method and Description |
---|---|
boolean |
TaskActionToolbox.taskLockCoversSegments(Task task,
Set<DataSegment> segments) |
void |
TaskActionToolbox.verifyTaskLocks(Task task,
Set<DataSegment> segments) |
Constructor and Description |
---|
SegmentInsertAction(Set<DataSegment> segments) |
SegmentMetadataUpdateAction(Set<DataSegment> segments) |
SegmentNukeAction(Set<DataSegment> segments) |
SegmentTransactionalInsertAction(Set<DataSegment> segments) |
SegmentTransactionalInsertAction(Set<DataSegment> segments,
DataSourceMetadata startMetadata,
DataSourceMetadata endMetadata) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
HadoopConverterTask.getSegment() |
DataSegment |
ConvertSegmentTask.getSegment() |
DataSegment |
ConvertSegmentTask.SubTask.getSegment() |
Modifier and Type | Method and Description |
---|---|
List<DataSegment> |
MergeTaskBase.getSegments() |
List<DataSegment> |
HadoopConverterTask.ConverterSubTask.getSegments() |
Modifier and Type | Method and Description |
---|---|
static ConvertSegmentTask |
ConvertSegmentTask.create(DataSegment segment,
IndexSpec indexSpec,
boolean force,
boolean validate,
Map<String,Object> context)
Create a task to update the segment specified to the most recent binary version with the specified indexSpec
|
void |
RealtimeIndexTask.TaskActionSegmentPublisher.publishSegment(DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
protected Iterable<Task> |
HadoopConverterTask.generateSubTasks(String groupId,
Iterable<DataSegment> segments,
IndexSpec indexSpec,
boolean force,
boolean validate,
Map<String,Object> context) |
protected Iterable<Task> |
ConvertSegmentTask.generateSubTasks(String groupId,
Iterable<DataSegment> segments,
IndexSpec indexSpec,
boolean force,
boolean validate,
Map<String,Object> context) |
protected abstract File |
MergeTaskBase.merge(TaskToolbox taskToolbox,
Map<DataSegment,File> segments,
File outDir) |
File |
MergeTask.merge(TaskToolbox toolbox,
Map<DataSegment,File> segments,
File outDir) |
File |
AppendTask.merge(TaskToolbox toolbox,
Map<DataSegment,File> segments,
File outDir) |
Constructor and Description |
---|
ConvertSegmentBackwardsCompatibleTask(String id,
String dataSource,
org.joda.time.Interval interval,
DataSegment segment,
IndexSpec indexSpec,
Boolean force,
Boolean validate)
Deprecated.
|
ConvertSegmentTask(String id,
String dataSource,
org.joda.time.Interval interval,
DataSegment segment,
IndexSpec indexSpec,
boolean force,
boolean validate,
Map<String,Object> context) |
SubTask(String groupId,
DataSegment segment,
IndexSpec indexSpec,
Boolean force,
Boolean validate)
Deprecated.
|
SubTask(String groupId,
DataSegment segment,
IndexSpec indexSpec,
Boolean force,
Boolean validate,
Map<String,Object> context) |
Constructor and Description |
---|
AppendTask(String id,
String dataSource,
List<DataSegment> segments,
List<AggregatorFactory> aggregators,
IndexSpec indexSpec,
Boolean buildV9Directly,
Map<String,Object> context) |
ConverterSubTask(List<DataSegment> segments,
HadoopConverterTask parent,
Map<String,Object> context) |
MergeTask(String id,
String dataSource,
List<DataSegment> segments,
List<AggregatorFactory> aggregators,
Boolean rollup,
IndexSpec indexSpec,
Boolean buildV9Directly,
Map<String,Object> context) |
MergeTaskBase(String id,
String dataSource,
List<DataSegment> segments,
Map<String,Object> context) |
Modifier and Type | Method and Description |
---|---|
List<DataSegment> |
OverlordActionBasedUsedSegmentLister.getUsedSegmentsForIntervals(String dataSource,
List<org.joda.time.Interval> intervals) |
Modifier and Type | Method and Description |
---|---|
Set<DataSegment> |
IndexerMetadataStorageCoordinator.announceHistoricalSegments(Set<DataSegment> segments)
Attempts to insert a set of segments to the metadata storage.
|
Set<DataSegment> |
TaskStorageQueryAdapter.getInsertedSegments(String taskid)
Returns all segments created by this task.
|
Set<DataSegment> |
SegmentPublishResult.getSegments() |
List<DataSegment> |
IndexerMetadataStorageCoordinator.getUnusedSegmentsForInterval(String dataSource,
org.joda.time.Interval interval)
Get all segments which include ONLY data within the given interval and are not flagged as used.
|
List<DataSegment> |
IndexerMetadataStorageCoordinator.getUsedSegmentsForInterval(String dataSource,
org.joda.time.Interval interval)
Get all segments which may include any data in the interval and are flagged as used.
|
List<DataSegment> |
IndexerMetadataStorageCoordinator.getUsedSegmentsForIntervals(String dataSource,
List<org.joda.time.Interval> intervals)
Get all segments which may include any data in the interval and are flagged as used.
|
Modifier and Type | Method and Description |
---|---|
Set<DataSegment> |
IndexerMetadataStorageCoordinator.announceHistoricalSegments(Set<DataSegment> segments)
Attempts to insert a set of segments to the metadata storage.
|
SegmentPublishResult |
IndexerMetadataStorageCoordinator.announceHistoricalSegments(Set<DataSegment> segments,
DataSourceMetadata startMetadata,
DataSourceMetadata endMetadata)
Attempts to insert a set of segments to the metadata storage.
|
void |
IndexerMetadataStorageCoordinator.deleteSegments(Set<DataSegment> segments) |
void |
IndexerMetadataStorageCoordinator.updateSegmentMetadata(Set<DataSegment> segments) |
Constructor and Description |
---|
SegmentPublishResult(Set<DataSegment> segments,
boolean success) |
Modifier and Type | Method and Description |
---|---|
Set<DataSegment> |
IndexerSQLMetadataStorageCoordinator.announceHistoricalSegments(Set<DataSegment> segments)
Attempts to insert a set of segments to the database.
|
List<DataSegment> |
IndexerSQLMetadataStorageCoordinator.getUnusedSegmentsForInterval(String dataSource,
org.joda.time.Interval interval) |
List<DataSegment> |
IndexerSQLMetadataStorageCoordinator.getUsedSegmentsForInterval(String dataSource,
org.joda.time.Interval interval) |
List<DataSegment> |
IndexerSQLMetadataStorageCoordinator.getUsedSegmentsForIntervals(String dataSource,
List<org.joda.time.Interval> intervals) |
Modifier and Type | Method and Description |
---|---|
void |
SQLMetadataSegmentPublisher.publishSegment(DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
Set<DataSegment> |
IndexerSQLMetadataStorageCoordinator.announceHistoricalSegments(Set<DataSegment> segments)
Attempts to insert a set of segments to the database.
|
SegmentPublishResult |
IndexerSQLMetadataStorageCoordinator.announceHistoricalSegments(Set<DataSegment> segments,
DataSourceMetadata startMetadata,
DataSourceMetadata endMetadata)
Attempts to insert a set of segments to the metadata storage.
|
void |
IndexerSQLMetadataStorageCoordinator.deleteSegments(Set<DataSegment> segments) |
void |
IndexerSQLMetadataStorageCoordinator.updateSegmentMetadata(Set<DataSegment> segments) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
DataSegmentArchiver.archive(DataSegment segment)
Perform an archive task on the segment and return the resulting segment or null if there was no action needed.
|
DataSegment |
OmniDataSegmentArchiver.archive(DataSegment segment) |
DataSegment |
DataSegmentMover.move(DataSegment segment,
Map<String,Object> targetLoadSpec) |
DataSegment |
OmniDataSegmentMover.move(DataSegment segment,
Map<String,Object> targetLoadSpec) |
DataSegment |
DataSegmentPusher.push(File file,
DataSegment segment) |
DataSegment |
LocalDataSegmentPusher.push(File dataSegmentFile,
DataSegment segment) |
DataSegment |
DataSegmentArchiver.restore(DataSegment segment)
Perform the restore from an archived segment and return the resulting segment or null if there was no action
|
DataSegment |
OmniDataSegmentArchiver.restore(DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
Set<DataSegment> |
DataSegmentFinder.findSegments(String workingDirPath,
boolean updateDescriptor)
This method should first recursively look for descriptor.json (partitionNum_descriptor.json for HDFS data storage) underneath
workingDirPath and then verify that index.zip (partitionNum_index.zip for HDFS data storage) exists in the same folder.
|
Set<DataSegment> |
LocalDataSegmentFinder.findSegments(String workingDirPath,
boolean updateDescriptor) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
DataSegmentArchiver.archive(DataSegment segment)
Perform an archive task on the segment and return the resulting segment or null if there was no action needed.
|
DataSegment |
OmniDataSegmentArchiver.archive(DataSegment segment) |
void |
SegmentLoaderLocalCacheManager.cleanup(DataSegment segment) |
void |
SegmentLoader.cleanup(DataSegment segment) |
Segment |
SegmentizerFactory.factorize(DataSegment segment,
File parentDir) |
Segment |
MMappedQueryableSegmentizerFactory.factorize(DataSegment dataSegment,
File parentDir) |
io.druid.segment.loading.StorageLocation |
SegmentLoaderLocalCacheManager.findStorageLocationIfLoaded(DataSegment segment) |
static String |
DataSegmentPusherUtil.getHdfsStorageDir(DataSegment segment)
Due to https://issues.apache.org/jira/browse/HDFS-13 ":" are not allowed in
path names.
|
Segment |
SegmentLoaderLocalCacheManager.getSegment(DataSegment segment) |
Segment |
SegmentLoader.getSegment(DataSegment segment) |
File |
SegmentLoaderLocalCacheManager.getSegmentFiles(DataSegment segment) |
File |
SegmentLoader.getSegmentFiles(DataSegment segment) |
void |
DataSegmentPuller.getSegmentFiles(DataSegment segment,
File dir)
Pull down segment files for the given DataSegment and put them in the given directory.
|
void |
LocalDataSegmentPuller.getSegmentFiles(DataSegment segment,
File dir) |
static String |
DataSegmentPusherUtil.getStorageDir(DataSegment segment) |
boolean |
SegmentLoaderLocalCacheManager.isSegmentLoaded(DataSegment segment) |
boolean |
SegmentLoader.isSegmentLoaded(DataSegment segment) |
void |
DataSegmentKiller.kill(DataSegment segments) |
void |
OmniDataSegmentKiller.kill(DataSegment segment) |
void |
LocalDataSegmentKiller.kill(DataSegment segment) |
DataSegment |
DataSegmentMover.move(DataSegment segment,
Map<String,Object> targetLoadSpec) |
DataSegment |
OmniDataSegmentMover.move(DataSegment segment,
Map<String,Object> targetLoadSpec) |
DataSegment |
DataSegmentPusher.push(File file,
DataSegment segment) |
DataSegment |
LocalDataSegmentPusher.push(File dataSegmentFile,
DataSegment segment) |
DataSegment |
DataSegmentArchiver.restore(DataSegment segment)
Perform the restore from an archived segment and return the resulting segment or null if there was no action
|
DataSegment |
OmniDataSegmentArchiver.restore(DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
void |
SegmentPublisher.publishSegment(DataSegment segment) |
void |
NoopSegmentPublisher.publishSegment(DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
Set<DataSegment> |
UsedSegmentChecker.findUsedSegments(Set<SegmentIdentifier> identifiers)
For any identifiers that exist and are actually used, returns the corresponding DataSegment objects.
|
List<DataSegment> |
SegmentsAndMetadata.getSegments() |
Modifier and Type | Method and Description |
---|---|
static SegmentIdentifier |
SegmentIdentifier.fromDataSegment(DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
boolean |
TransactionalSegmentPublisher.publishSegments(Set<DataSegment> segments,
Object commitMetadata)
Publish segments, along with some commit metadata, in a single transaction.
|
Constructor and Description |
---|
SegmentsAndMetadata(List<DataSegment> segments,
Object commitMetadata) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
Sink.getSegment() |
Modifier and Type | Method and Description |
---|---|
DataSegment |
SegmentChangeRequestLoad.getSegment() |
DataSegment |
SegmentChangeRequestDrop.getSegment() |
Modifier and Type | Method and Description |
---|---|
Collection<DataSegment> |
ZkCoordinator.getPendingDeleteSnapshot() |
Modifier and Type | Method and Description |
---|---|
void |
ZkCoordinator.addSegment(DataSegment segment,
DataSegmentChangeCallback callback) |
void |
DataSegmentChangeHandler.addSegment(DataSegment segment,
DataSegmentChangeCallback callback) |
void |
DataSegmentAnnouncer.announceSegment(DataSegment segment) |
void |
BatchDataSegmentAnnouncer.announceSegment(DataSegment segment) |
void |
ServerManager.dropSegment(DataSegment segment) |
boolean |
DataSegmentAnnouncer.isAnnounced(DataSegment segment) |
boolean |
BatchDataSegmentAnnouncer.isAnnounced(DataSegment segment) |
boolean |
ServerManager.isSegmentCached(DataSegment segment) |
boolean |
ServerManager.loadSegment(DataSegment segment)
Load a single segment.
|
void |
ZkCoordinator.removeSegment(DataSegment segment,
DataSegmentChangeCallback callback) |
void |
DataSegmentChangeHandler.removeSegment(DataSegment segment,
DataSegmentChangeCallback callback) |
void |
DataSegmentAnnouncer.unannounceSegment(DataSegment segment) |
void |
BatchDataSegmentAnnouncer.unannounceSegment(DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
void |
DataSegmentAnnouncer.announceSegments(Iterable<DataSegment> segments) |
void |
BatchDataSegmentAnnouncer.announceSegments(Iterable<DataSegment> segments) |
void |
DataSegmentAnnouncer.unannounceSegments(Iterable<DataSegment> segments) |
void |
BatchDataSegmentAnnouncer.unannounceSegments(Iterable<DataSegment> segments) |
Constructor and Description |
---|
SegmentChangeRequestDrop(DataSegment segment) |
SegmentChangeRequestLoad(DataSegment segment) |
Modifier and Type | Field and Description |
---|---|
static Comparator<DataSegment> |
DruidCoordinator.SEGMENT_COMPARATOR |
Modifier and Type | Method and Description |
---|---|
DataSegment |
BalancerSegmentHolder.getSegment() |
Modifier and Type | Method and Description |
---|---|
Iterable<DataSegment> |
DruidCoordinator.getAvailableDataSegments() |
Set<DataSegment> |
DruidCoordinatorRuntimeParams.getAvailableSegments() |
Set<DataSegment> |
DruidCoordinator.getOrderedAvailableDataSegments() |
Set<DataSegment> |
LoadQueuePeon.getSegmentsToDrop() |
Set<DataSegment> |
LoadQueuePeon.getSegmentsToLoad() |
Modifier and Type | Method and Description |
---|---|
protected Pair<Double,ServerHolder> |
CostBalancerStrategy.chooseBestServer(DataSegment proposalSegment,
Iterable<ServerHolder> serverHolders,
boolean includeCurrentServer)
For assignment, we want to move to the lowest cost server that isn't already serving the segment.
|
protected double |
DiskNormalizedCostBalancerStrategy.computeCost(DataSegment proposalSegment,
ServerHolder server,
boolean includeCurrentServer)
Averages the cost obtained from CostBalancerStrategy.
|
protected double |
CostBalancerStrategy.computeCost(DataSegment proposalSegment,
ServerHolder server,
boolean includeCurrentServer) |
static double |
CostBalancerStrategy.computeJointSegmentsCost(DataSegment segmentA,
DataSegment segmentB)
This defines the unnormalized cost function between two segments.
|
void |
LoadQueuePeon.dropSegment(DataSegment segment,
LoadPeonCallback callback) |
ServerHolder |
RandomBalancerStrategy.findNewSegmentHomeBalancer(DataSegment proposalSegment,
List<ServerHolder> serverHolders) |
ServerHolder |
CostBalancerStrategy.findNewSegmentHomeBalancer(DataSegment proposalSegment,
List<ServerHolder> serverHolders) |
ServerHolder |
BalancerStrategy.findNewSegmentHomeBalancer(DataSegment proposalSegment,
List<ServerHolder> serverHolders) |
ServerHolder |
RandomBalancerStrategy.findNewSegmentHomeReplicator(DataSegment proposalSegment,
List<ServerHolder> serverHolders) |
ServerHolder |
CostBalancerStrategy.findNewSegmentHomeReplicator(DataSegment proposalSegment,
List<ServerHolder> serverHolders) |
ServerHolder |
BalancerStrategy.findNewSegmentHomeReplicator(DataSegment proposalSegment,
List<ServerHolder> serverHolders) |
boolean |
ServerHolder.isLoadingSegment(DataSegment segment) |
boolean |
ServerHolder.isServingSegment(DataSegment segment) |
void |
LoadQueuePeon.loadSegment(DataSegment segment,
LoadPeonCallback callback) |
void |
DruidCoordinator.removeSegment(DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
DruidCoordinatorRuntimeParams.Builder |
DruidCoordinatorRuntimeParams.Builder.withAvailableSegments(Collection<DataSegment> availableSegmentsCollection) |
Constructor and Description |
---|
BalancerSegmentHolder(ImmutableDruidServer fromServer,
DataSegment segment) |
Constructor and Description |
---|
DruidCoordinatorRuntimeParams(long startTime,
DruidCluster druidCluster,
MetadataRuleManager databaseRuleManager,
SegmentReplicantLookup segmentReplicantLookup,
Set<DruidDataSource> dataSources,
Set<DataSegment> availableSegments,
Map<String,LoadQueuePeon> loadManagementPeons,
ReplicationThrottler replicationManager,
com.metamx.emitter.service.ServiceEmitter emitter,
CoordinatorDynamicConfig coordinatorDynamicConfig,
CoordinatorStats stats,
org.joda.time.DateTime balancerReferenceTimestamp,
BalancerStrategy balancerStrategy) |
Modifier and Type | Method and Description |
---|---|
boolean |
Rule.appliesTo(DataSegment segment,
org.joda.time.DateTime referenceTimestamp) |
boolean |
PeriodLoadRule.appliesTo(DataSegment segment,
org.joda.time.DateTime referenceTimestamp) |
boolean |
PeriodDropRule.appliesTo(DataSegment segment,
org.joda.time.DateTime referenceTimestamp) |
boolean |
IntervalLoadRule.appliesTo(DataSegment segment,
org.joda.time.DateTime referenceTimestamp) |
boolean |
IntervalDropRule.appliesTo(DataSegment segment,
org.joda.time.DateTime referenceTimestamp) |
boolean |
ForeverLoadRule.appliesTo(DataSegment segment,
org.joda.time.DateTime referenceTimestamp) |
boolean |
ForeverDropRule.appliesTo(DataSegment segment,
org.joda.time.DateTime referenceTimestamp) |
CoordinatorStats |
Rule.run(DruidCoordinator coordinator,
DruidCoordinatorRuntimeParams params,
DataSegment segment) |
CoordinatorStats |
LoadRule.run(DruidCoordinator coordinator,
DruidCoordinatorRuntimeParams params,
DataSegment segment) |
CoordinatorStats |
DropRule.run(DruidCoordinator coordinator,
DruidCoordinatorRuntimeParams params,
DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
AzureDataSegmentPusher.push(File indexFilesDir,
DataSegment segment) |
DataSegment |
AzureDataSegmentPusher.uploadDataSegment(DataSegment segment,
int version,
long size,
File compressedSegmentData,
File descriptorFile,
Map<String,String> azurePaths) |
Modifier and Type | Method and Description |
---|---|
File |
AzureDataSegmentPusher.createSegmentDescriptorFile(com.fasterxml.jackson.databind.ObjectMapper jsonMapper,
DataSegment segment) |
Map<String,String> |
AzureDataSegmentPusher.getAzurePaths(DataSegment segment) |
void |
AzureDataSegmentPuller.getSegmentFiles(DataSegment segment,
File outDir) |
void |
AzureDataSegmentKiller.kill(DataSegment segment) |
DataSegment |
AzureDataSegmentPusher.push(File indexFilesDir,
DataSegment segment) |
DataSegment |
AzureDataSegmentPusher.uploadDataSegment(DataSegment segment,
int version,
long size,
File compressedSegmentData,
File descriptorFile,
Map<String,String> azurePaths) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
CassandraDataSegmentPusher.push(File indexFilesDir,
DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
void |
CassandraDataSegmentPuller.getSegmentFiles(DataSegment segment,
File outDir) |
DataSegment |
CassandraDataSegmentPusher.push(File indexFilesDir,
DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
CloudFilesDataSegmentPusher.push(File indexFilesDir,
DataSegment inSegment) |
Modifier and Type | Method and Description |
---|---|
static String |
CloudFilesUtils.buildCloudFilesPath(String basePath,
DataSegment segment) |
void |
CloudFilesDataSegmentPuller.getSegmentFiles(DataSegment segment,
File outDir) |
DataSegment |
CloudFilesDataSegmentPusher.push(File indexFilesDir,
DataSegment inSegment) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
GoogleDataSegmentPusher.push(File indexFilesDir,
DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
File |
GoogleDataSegmentPusher.createDescriptorFile(com.fasterxml.jackson.databind.ObjectMapper jsonMapper,
DataSegment segment) |
void |
GoogleDataSegmentPuller.getSegmentFiles(DataSegment segment,
File outDir) |
void |
GoogleDataSegmentKiller.kill(DataSegment segment) |
DataSegment |
GoogleDataSegmentPusher.push(File indexFilesDir,
DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
HdfsDataSegmentPusher.push(File inDir,
DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
Set<DataSegment> |
HdfsDataSegmentFinder.findSegments(String workingDirPathStr,
boolean updateDescriptor) |
Modifier and Type | Method and Description |
---|---|
void |
HdfsDataSegmentPuller.getSegmentFiles(DataSegment segment,
File dir) |
void |
HdfsDataSegmentKiller.kill(DataSegment segment) |
DataSegment |
HdfsDataSegmentPusher.push(File inDir,
DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
S3DataSegmentArchiver.archive(DataSegment segment) |
DataSegment |
S3DataSegmentMover.move(DataSegment segment,
Map<String,Object> targetLoadSpec) |
DataSegment |
S3DataSegmentPusher.push(File indexFilesDir,
DataSegment inSegment) |
DataSegment |
S3DataSegmentArchiver.restore(DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
Set<DataSegment> |
S3DataSegmentFinder.findSegments(String workingDirPath,
boolean updateDescriptor) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
S3DataSegmentArchiver.archive(DataSegment segment) |
static String |
S3Utils.constructSegmentPath(String baseKey,
DataSegment segment) |
void |
S3DataSegmentPuller.getSegmentFiles(DataSegment segment,
File outDir) |
void |
S3DataSegmentKiller.kill(DataSegment segment) |
DataSegment |
S3DataSegmentMover.move(DataSegment segment,
Map<String,Object> targetLoadSpec) |
DataSegment |
S3DataSegmentPusher.push(File indexFilesDir,
DataSegment inSegment) |
DataSegment |
S3DataSegmentArchiver.restore(DataSegment segment) |
Constructor and Description |
---|
S3Coords(DataSegment segment) |
Modifier and Type | Method and Description |
---|---|
DataSegment |
DataSegment.Builder.build() |
DataSegment |
DataSegment.withBinaryVersion(int binaryVersion) |
DataSegment |
DataSegment.withDimensions(List<String> dimensions) |
DataSegment |
DataSegment.withLoadSpec(Map<String,Object> loadSpec) |
DataSegment |
DataSegment.withMetrics(List<String> metrics) |
DataSegment |
DataSegment.withSize(long size) |
DataSegment |
DataSegment.withVersion(String version) |
Modifier and Type | Method and Description |
---|---|
static Comparator<DataSegment> |
DataSegment.bucketMonthComparator() |
static VersionedIntervalTimeline<String,DataSegment> |
VersionedIntervalTimeline.forSegments(Iterable<DataSegment> segments) |
Modifier and Type | Method and Description |
---|---|
static DataSegment.Builder |
DataSegment.builder(DataSegment segment) |
int |
DataSegment.compareTo(DataSegment dataSegment) |
Modifier and Type | Method and Description |
---|---|
static VersionedIntervalTimeline<String,DataSegment> |
VersionedIntervalTimeline.forSegments(Iterable<DataSegment> segments) |
Constructor and Description |
---|
Builder(DataSegment segment) |
Copyright © 2011–2017. All rights reserved.