replaced eithers with conditionals to comply with latest versions of schema

This commit is contained in:
Guillaume Buisson 2016-02-05 17:38:27 +01:00
parent 15173fbb51
commit e0f0880b36
10 changed files with 110 additions and 98 deletions

View file

@ -6,7 +6,7 @@
:dependencies [[org.clojure/clojure "1.7.0"]
[swiss-arrows "1.0.0"]
[clj-http "2.0.0"]
[prismatic/schema "0.4.4"]
[prismatic/schema "1.0.4"]
[org.clojure/tools.logging "0.3.1"]
[org.clojure/data.json "0.2.6"]
[zookeeper-clj "0.9.1"]])

View file

@ -54,28 +54,22 @@
:name s/Str
:fieldName s/Str})
(s/defschema aggregation
"Aggregations are specifications of processing over metrics available in Druid"
(s/conditional
#(= :count (:type %)) countAggregator
#(= :longSum (:type %)) longSumAggregator
#(= :doubleSum (:type %)) doubleSumAggregator
#(= :min (:type %)) minAggregator
#(= :max (:type %)) maxAggregator
#(= :javascript (:type %)) javascriptAggregator
#(= :cardinality (:type %)) cardinalityAggregator
#(= :hyperUnique (:type %)) hyperUniqueAggregator))
(s/defschema filteredAggregator
"A filtered aggregator wraps any given aggregator, but only aggregates the values for which the given dimension filter matches.
This makes it possible to compute the results of a filtered and an unfiltered aggregation simultaneously, without having to issue multiple queries, and use both results as part of post-aggregations."
{:type (s/enum :filtered)
:filter Filter
:aggregator (s/either countAggregator
longSumAggregator
doubleSumAggregator
minAggregator
maxAggregator
javascriptAggregator
cardinalityAggregator
hyperUniqueAggregator)})
:aggregator aggregation})
(s/defschema aggregation
"Aggregations are specifications of processing over metrics available in Druid"
(s/either countAggregator
longSumAggregator
doubleSumAggregator
minAggregator
maxAggregator
javascriptAggregator
cardinalityAggregator
hyperUniqueAggregator
filteredAggregator))

View file

@ -30,12 +30,13 @@
:function s/Str})
(s/defschema extractionFn
(s/either regularExpressionExtractionFunction
partialExtractionFunction
searchQueryExtractionFunction
timeFormatExtractionFunction
timeParsingExtractionFunction
javascriptExtractionFunction))
(s/conditional
#(= :regex (:type %)) regularExpressionExtractionFunction
#(= :partial (:type %)) partialExtractionFunction
#(= :searchQuery (:type %)) searchQueryExtractionFunction
#(= :timeFormat (:type %)) timeFormatExtractionFunction
#(= :time (:type %)) timeParsingExtractionFunction
#(= :javascript (:type %)) javascriptExtractionFunction))
(s/defschema extraction
{:type (s/enum :extraction)

View file

@ -28,23 +28,23 @@ Selector filters can be used as the base filters for more complex Boolean expres
(s/defschema spatialFilter
{:type (s/enum :spatial)
:dimension s/Str
:bound (s/either
{:type (s/enum :rectangular)
:bound (s/conditional
#(= :rectangular (:type %)) {:type (s/enum :rectangular)
:minCoords [s/Num]
:maxCoords [s/Num]}
{:type (s/enum :radius)
#(= :radius (:type %)) {:type (s/enum :radius)
:coords [s/Num]
:radius s/Num})})
(s/defschema Filter
"A filter is a JSON object indicating which rows of data should be included in the computation for a query.
Its essentially the equivalent of the WHERE clause in SQL. Druid supports the following types of filters."
(s/either selectorFilter
regexFilter
javascriptFilter
spatialFilter
{:type (s/enum :not)
:field (s/recursive #'Filter)}
{:type (s/enum :or :and)
(s/conditional
#(= :selector (:type %)) selectorFilter
#(= :regex (:type %)) regexFilter
#(= :javascript (:type %)) javascriptFilter
#(= :spatial (:type %)) spatialFilter
#(or (= :or (:type %))
(= :and (:type %))) {:type (s/enum :or :and)
:fields [(s/recursive #'Filter)]}))

View file

@ -30,9 +30,10 @@
(s/defschema dimensionSpec
"define how dimension values get transformed prior to aggregation"
(s/either s/Str
defaultDimension
extraction))
(s/conditional
#(= :default (:type %)) defaultDimension
#(= :extraction (:type %)) extraction
:else s/Str))
(s/defschema orderByColumnSpec
"Druid orderByColumnSpec option schema"
@ -67,9 +68,15 @@
(s/defschema topNMetricSpec
"topN metric option schema"
(s/either {:type (s/enum :numeric :lexicographic :alphaNumeric :inverted)
(s/conditional
#(map? %) {:type (s/enum
:numeric
:lexicographic
:alphaNumeric
:inverted)
(s/optional-key :metric) s/Str
(s/optional-key :previousStop) s/Str} s/Str))
(s/optional-key :previousStop) s/Str}
:else s/Str))
(s/defschema segmentMetadataToInclude
"Druid SegmentMetadata toInclude option schema"

View file

@ -28,9 +28,10 @@ minutes and seconds (e.g. P2W, P3M, PT1H30M, PT0.750S) in ISO8601 format."
(s/defschema granularity
"The granularity field determines how data gets bucketed across the time dimension, or how it gets aggregated by hour, day, minute, etc."
(s/either simpleGranularity
durationGranularity
periodGranularity))
(s/conditional
#(= :duration (:type %)) durationGranularity
#(= :period (:type %)) periodGranularity
:else simpleGranularity))

View file

@ -19,8 +19,9 @@
:value Long})
(s/defschema having
(s/either havingEqualTo
havingGreaterThan
havingLessThan
{:type (s/enum :or :not :and)
(s/conditional
#(= :equalTo (:type %)) havingEqualTo
#(= :greaterThan (:type %)) havingGreaterThan
#(= :lessThan (:type %)) havingLessThan
:else {:type (s/enum :or :not :and)
:havingSpecs (s/recursive #'having)}))

View file

@ -30,12 +30,12 @@
{:type (s/enum :arithmetic)
:name s/Str
:fn (s/enum "+" "-" "*" "/" "quotient")
:fields [(s/either
(s/recursive #'arithmeticPostAggregator)
fieldAccessPostAggregator
constantPostAggregator
javascriptPostAggregator
hyperUniqueCardinalityPostAggregator)]
:fields [(s/conditional
#(= :arithmetic (:type %)) (s/recursive #'arithmeticPostAggregator)
#(= :fieldAccess (:type %)) fieldAccessPostAggregator
#(= :constant (:type %)) constantPostAggregator
#(= :javascript (:type %)) javascriptPostAggregator
#(= :hyperUniqueCardinality (:type %)) hyperUniqueCardinalityPostAggregator)]
(s/optional-key :ordering) (s/enum nil "numericFirst")})
@ -93,16 +93,16 @@ Offset determines the value on which those interval bins align."
:probabilities [s/Any]})
(s/defschema postAggregation
(s/either arithmeticPostAggregator
fieldAccessPostAggregator
constantPostAggregator
javascriptPostAggregator
hyperUniqueCardinalityPostAggregator
equalBucketsPostAggregator
bucketsPostAggregator
customBucketsPostAggregator
minPostAggregator
maxPostAggregator
quantilePostAggregator
quantilesPostAggregator))
(s/conditional
#(= :arithmetic (:type %)) arithmeticPostAggregator
#(= :fieldAccess (:type %)) fieldAccessPostAggregator
#(= :constant (:type %)) constantPostAggregator
#(= :javascript (:type %)) javascriptPostAggregator
#(= :hyperUniqueCardinality (:type %)) hyperUniqueCardinalityPostAggregator
#(= :equalBuckets (:type %)) equalBucketsPostAggregator
#(= :buckets (:type %)) bucketsPostAggregator
#(= :customBuckets (:type %)) customBucketsPostAggregator
#(= :min (:type %)) minPostAggregator
#(= :max (:type %)) maxPostAggregator
#(= :quantile (:type %)) quantilePostAggregator
#(= :quantiles (:type %)) quantilesPostAggregator))

View file

@ -84,7 +84,9 @@ TopNs are much faster and resource efficient than GroupBys for this use case."
(s/optional-key :pagingSpec) pagingSpec
(s/optional-key :limitSpec) limitSpec
(s/optional-key :filter) Filter
(s/optional-key :context) context})
;;(s/optional-key :context) context
}
)
(s/defschema segmentMetadata
"Segment metadata queries return per segment information"
@ -105,13 +107,14 @@ TopNs are much faster and resource efficient than GroupBys for this use case."
(s/defschema query
"druid query router"
(s/either groupBy
search
segmentMetadata
timeBoundary
timeseries
topN
select))
(s/conditional
#(= :groupBy (:queryType %)) groupBy
#(= :search (:queryType %)) search
#(= :segmentMetadata (:queryType %)) segmentMetadata
#(= :timeBoundary (:queryType %)) timeBoundary
#(= :timeSeries (:queryType %)) timeseries
#(= :topN (:queryType %)) topN
#(= :select (:queryType %)) select))
(def queries {:groupBy groupBy
:search search

View file

@ -97,6 +97,11 @@
:metrics []
:granularity :all
:intervals ["2013-01-01/2013-01-02"]
:filter {:type :spatial
:dimension "test"
:bound {:type :rectangular
:minCoords [1 2]
:maxCoords [3 4]}}
:pagingSpec {:pagingIdentifiers {} :threshold 5}})
@ -106,7 +111,7 @@
:threshold 5
:metric "count"}))
(deftest test-valid-groupby-query
(deftest test-valid-groupby-query
(is (= (validate-groupby valid-groupby-query)
valid-groupby-query)))