slight upgrade

This commit is contained in:
Yann Esposito 2016-03-29 11:47:22 +02:00
parent af539989e2
commit 70a5929761
9 changed files with 469 additions and 20 deletions

3
.gitignore vendored
View file

@ -1 +1,4 @@
TAGS
compile
compile.hi
compile.o

Binary file not shown.

Binary file not shown.

View file

@ -72,12 +72,12 @@
// Optional libraries used to extend on reveal.js
dependencies: [
{ src: 'lib/js/classList.js', condition: function() { return !document.body.classList; } },
{ src: 'plugin/markdown/showdown.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
{ src: 'plugin/markdown/markdown.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
{ src: 'plugin/highlight/highlight.js', async: true, callback: function() { hljs.initHighlightingOnLoad(); } },
{ src: 'plugin/zoom-js/zoom.js', async: true, condition: function() { return !!document.body.classList; } },
{ src: 'plugin/notes/notes.js', async: true, condition: function() { return !!document.body.classList; } }
{ src: '/.reveal.js-3.2.0/lib/js/classList.js', condition: function() { return !document.body.classList; } },
{ src: '/.reveal.js-3.2.0/plugin/markdown/showdown.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
{ src: '/.reveal.js-3.2.0/plugin/markdown/markdown.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
{ src: '/.reveal.js-3.2.0/plugin/highlight/highlight.js', async: true, callback: function() { hljs.initHighlightingOnLoad(); } },
{ src: '/.reveal.js-3.2.0/plugin/zoom-js/zoom.js', async: true, condition: function() { return !!document.body.classList; } },
{ src: '/.reveal.js-3.2.0/plugin/notes/notes.js', async: true, condition: function() { return !!document.body.classList; } }
]
});

View file

@ -10,6 +10,7 @@ import Prelude hiding (FilePath)
import System.Console.ANSI
import Turtle
import Data.Maybe (isJust,fromJust)
import qualified System.IO as System
-- Command Line Options
data Options =
@ -64,11 +65,16 @@ findMarkdownFiles = do
_ <- guard (isJust mf)
return (fromJust mf)
pr :: Text -> IO ()
pr txt = do
T.putStr txt
System.hFlush System.stdout
-- | basic exec command with debug option and colors DONE or FAILED status
execcmd :: Bool -> FilePath -> Text -> IO ()
execcmd dbg dest cmd = do
when dbg (T.putStrLn cmd)
T.putStr $ format fp dest <> " "
pr (format fp dest <> " ")
answer <- shell cmd empty
case answer of
ExitSuccess -> greenPrn "[DONE]"

Binary file not shown.

View file

@ -22,31 +22,70 @@
<ul>
<li><a href="#intro">Intro</a><ul>
<li><a href="#plan">Plan</a></li>
<li><a href="#expérience">Expérience</a></li>
<li><a href="#experience">Experience</a></li>
<li><a href="#real-time">Real Time?</a></li>
<li><a href="#demande">Demande</a></li>
<li><a href="#en-pratique">En pratique</a></li>
<li><a href="#origine-php">Origine (PHP)</a></li>
<li><a href="#introduction">Introduction</a></li>
<li><a href="#pre-considerations">Pre Considerations</a></li>
<li><a href="#try-to-conceptualize-events">Try to conceptualize (events)</a></li>
<li><a href="#in-the-end">In the End</a></li>
</ul></li>
<li><a href="#druid">Druid</a><ul>
<li><a href="#who">Who</a></li>
<li><a href="#goal">Goal</a></li>
<li><a href="#concepts">Concepts</a></li>
<li><a href="#features">Features</a></li>
<li><a href="#proof">Proof</a></li>
<li><a href="#storage">Storage</a></li>
<li><a href="#columnar-storage">Columnar Storage</a></li>
<li><a href="#index">Index</a></li>
<li><a href="#data-segments">Data Segments</a></li>
<li><a href="#real-time-ingestion">Real-time ingestion</a></li>
<li><a href="#batch-ingestion">Batch Ingestion</a></li>
<li><a href="#real-time-ingestion-1">Real-time Ingestion</a></li>
</ul></li>
<li><a href="#querying">Querying</a><ul>
<li><a href="#query-types">Query types</a></li>
<li><a href="#tip">Tip</a></li>
<li><a href="#query-spec">Query Spec</a></li>
<li><a href="#examples">Example(s)</a></li>
<li><a href="#caching">Caching</a></li>
<li><a href="#load-rules">Load Rules</a></li>
</ul></li>
<li><a href="#components">Components</a><ul>
<li><a href="#druid-components">Druid Components</a></li>
<li><a href="#coordinator">Coordinator</a></li>
<li><a href="#real-time-nodes">Real-time Nodes</a></li>
<li><a href="#historical-nodes">Historical Nodes</a></li>
<li><a href="#overlord">Overlord</a></li>
<li><a href="#middle-manager">Middle Manager</a></li>
<li><a href="#broker-nodes">Broker Nodes</a></li>
<li><a href="#deep-storage">Deep Storage</a></li>
</ul></li>
<li><a href="#considerations-tools">Considerations &amp; Tools</a><ul>
<li><a href="#when-not-to-choose-druid">When <em>not</em> to choose Druid</a></li>
<li><a href="#graphite-metrics">Graphite (metrics)</a></li>
<li><a href="#pivot-exploring-data">Pivot (exploring data)</a></li>
<li><a href="#caravel-exploring-data">Caravel (exploring data)</a></li>
</ul></li>
</ul>
</nav>
<h1 id="intro">Intro</h1>
<h2 id="plan">Plan</h2>
<ul>
<li>Introduction ; pourquoi ?</li>
<li>Comment ?</li>
<li>Introduction; why?</li>
<li>How?</li>
</ul>
<h2 id="expérience">Expérience</h2>
<h2 id="experience">Experience</h2>
<ul>
<li>Real Time Social Media Analytics</li>
</ul>
<h2 id="real-time">Real Time?</h2>
<ul>
<li>Ingestion Latency: seconds</li>
<li>Query Latency: seconds</li>
</ul>
<h2 id="demande">Demande</h2>
<ul>
<li>Twitter: <code>20k msg/s</code>, <code>1msg = 10ko</code> pendant 24h</li>
@ -63,7 +102,19 @@
<li>Traitement de donnée gros volume + faible latence</li>
<li>Typiquement <code>pulse</code></li>
</ul>
<p><a href="http://pulse.vigiglo.be/#/vgteam/TV_Shows" target="_blank"> DEMO </a></p>
<p><a href="http://pulse.vigiglo.be/#/vigiglobe/Earthquake/dashboard" target="_blank"> DEMO </a></p>
<h2 id="pre-considerations">Pre Considerations</h2>
<p>Discovered vs Invented</p>
<h2 id="try-to-conceptualize-events">Try to conceptualize (events)</h2>
<p>Scalable + Real Time + Fail safe</p>
<ul>
<li>timeseries</li>
<li>alerting system</li>
<li>top N</li>
<li>etc…</li>
</ul>
<h2 id="in-the-end">In the End</h2>
<p>Druid concepts are always emerging naturally</p>
<h1 id="druid">Druid</h1>
<h2 id="who">Who</h2>
<p>Metamarkets</p>
@ -87,7 +138,162 @@
<li>low latency data ingestion</li>
</ul>
<p><strong>arbitrary exploration of billion-row tables tables with sub-second latencies</strong></p>
<h2 id="proof">Proof</h2>
<h2 id="storage">Storage</h2>
<ul>
<li>Columnar</li>
<li>Inverted Index</li>
<li>Immutable Segments</li>
</ul>
<h2 id="columnar-storage">Columnar Storage</h2>
<h2 id="index">Index</h2>
<ul>
<li>Values are dictionary encoded</li>
</ul>
<p><code>{&quot;USA&quot; 1, &quot;Canada&quot; 2, &quot;Mexico&quot; 3, ...}</code></p>
<ul>
<li>Bitmap for every dimension value (used by filters)</li>
</ul>
<p><code>&quot;USA&quot; -&gt; [0 1 0 0 1 1 0 0 0]</code></p>
<ul>
<li>Column values (used by aggergation queries)</li>
</ul>
<p><code>[2,1,3,15,1,1,2,8,7]</code></p>
<h2 id="data-segments">Data Segments</h2>
<ul>
<li>Per time interval</li>
<li>skip segments when querying</li>
<li>Immutable</li>
<li>Cache friendly</li>
<li>No locking</li>
<li>Versioned</li>
<li>No locking</li>
<li>Read-write concurrency</li>
</ul>
<h2 id="real-time-ingestion">Real-time ingestion</h2>
<ul>
<li>Via Real-Time Node and Firehose</li>
<li>No redundancy or HA, thus not recommended</li>
<li>Via Indexing Service and Tranquility API</li>
<li>Core API</li>
<li>Integration with Streaming Frameworks</li>
<li>HTTP Server</li>
<li><strong>Kafka Consumer</strong></li>
</ul>
<h2 id="batch-ingestion">Batch Ingestion</h2>
<ul>
<li>File based (HDFS, S3, …)</li>
</ul>
<h2 id="real-time-ingestion-1">Real-time Ingestion</h2>
<pre><code>Task 1: [ Interval ][ Window ]
Task 2: [ ]
---------------------------------------&gt;
time</code></pre>
<p>Minimum indexing slots =<br />
Data Sources × Partitions × Replicas × 2</p>
<h1 id="querying">Querying</h1>
<h2 id="query-types">Query types</h2>
<ul>
<li>Group by: group by multiple dimensions</li>
<li>Top N: like grouping by a single dimension</li>
<li>Timeseries: without grouping over dimensions</li>
<li>Search: Dimensions lookup</li>
<li>Time Boundary: Find available data timeframe</li>
<li>Metadata queries</li>
</ul>
<h2 id="tip">Tip</h2>
<ul>
<li>Prefer <code>topN</code> over <code>groupBy</code></li>
<li>Prefer <code>timeseries</code> over <code>topN</code></li>
<li>Use limits (and priorities)</li>
</ul>
<h2 id="query-spec">Query Spec</h2>
<ul>
<li>Data source</li>
<li>Dimensions</li>
<li>Interval</li>
<li>Filters</li>
<li>Aggergations</li>
<li>Post Aggregations</li>
<li>Granularity</li>
<li>Context (query configuration)</li>
<li>Limit</li>
</ul>
<h2 id="examples">Example(s)</h2>
<p>TODO</p>
<h2 id="caching">Caching</h2>
<ul>
<li>Historical node level</li>
<li>By segment</li>
<li>Broker Level</li>
<li>By segment and query</li>
<li><code>groupBy</code> is disabled on purpose!</li>
<li>By default - local caching</li>
</ul>
<h2 id="load-rules">Load Rules</h2>
<ul>
<li>Can be defined</li>
<li>What can be set</li>
</ul>
<h1 id="components">Components</h1>
<h2 id="druid-components">Druid Components</h2>
<ul>
<li>Real-time Nodes</li>
<li>Historical Nodes</li>
<li>Broker Nodes</li>
<li>Coordinator</li>
<li>For indexing:</li>
<li>Overlord</li>
<li><p>Middle Manager</p></li>
<li>Deep Storage</li>
<li><p>Metadata Storage</p></li>
<li>Load Balancer</li>
<li><p>Cache</p></li>
</ul>
<h2 id="coordinator">Coordinator</h2>
<p>Manage Segments</p>
<h2 id="real-time-nodes">Real-time Nodes</h2>
<ul>
<li>Pulling data in real-time</li>
<li>Indexing it</li>
</ul>
<h2 id="historical-nodes">Historical Nodes</h2>
<ul>
<li>Keep historical segments</li>
</ul>
<h2 id="overlord">Overlord</h2>
<ul>
<li>Accepts tasks and distributes them to middle manager</li>
</ul>
<h2 id="middle-manager">Middle Manager</h2>
<ul>
<li>Execute submitted tasks via Peons</li>
</ul>
<h2 id="broker-nodes">Broker Nodes</h2>
<ul>
<li>Route query to Real-time and Historical nodes</li>
<li>Merge results</li>
</ul>
<h2 id="deep-storage">Deep Storage</h2>
<ul>
<li>Segments backup (HDFS, S3, …)</li>
</ul>
<h1 id="considerations-tools">Considerations &amp; Tools</h1>
<h2 id="when-not-to-choose-druid">When <em>not</em> to choose Druid</h2>
<ul>
<li>Data is not time-series</li>
<li>Cardinality is <em>very</em> high</li>
<li>Number of dimensions is high</li>
<li>Setup cost must be avoided</li>
</ul>
<h2 id="graphite-metrics">Graphite (metrics)</h2>
<p><img src="img/graphite.png" alt="Graphite" />__</p>
<p><a href="http://graphite.wikidot.com">Graphite</a></p>
<h2 id="pivot-exploring-data">Pivot (exploring data)</h2>
<p><img src="img/pivot.gif" alt="Pivot" /> </p>
<p><a href="https://github.com/implydata/pivot">Pivot</a></p>
<h2 id="caravel-exploring-data">Caravel (exploring data)</h2>
<p><img src="img/caravel.png" alt="caravel" /> </p>
<p><a href="https://github.com/airbnb/caravel">Caravel</a></p>
<div id="footer">
<a href="yannesposito.com">Y</a>
</div>

Binary file not shown.

View file

@ -43,16 +43,23 @@
<section id="plan" class="level2">
<h2>Plan</h2>
<ul>
<li>Introduction ; pourquoi ?</li>
<li>Comment ?</li>
<li>Introduction; why?</li>
<li>How?</li>
</ul>
</section>
<section id="expérience" class="level2">
<h2>Expérience</h2>
<section id="experience" class="level2">
<h2>Experience</h2>
<ul>
<li>Real Time Social Media Analytics</li>
</ul>
</section>
<section id="real-time" class="level2">
<h2>Real Time?</h2>
<ul>
<li>Ingestion Latency: seconds</li>
<li>Query Latency: seconds</li>
</ul>
</section>
<section id="demande" class="level2">
<h2>Demande</h2>
<ul>
@ -78,6 +85,24 @@
</ul>
<p><a href="http://pulse.vigiglo.be/#/vigiglobe/Earthquake/dashboard" target="_blank"> DEMO </a></p>
</section>
<section id="pre-considerations" class="level2">
<h2>Pre Considerations</h2>
<p>Discovered vs Invented</p>
</section>
<section id="try-to-conceptualize-events" class="level2">
<h2>Try to conceptualize (events)</h2>
<p>Scalable + Real Time + Fail safe</p>
<ul>
<li>timeseries</li>
<li>alerting system</li>
<li>top N</li>
<li>etc...</li>
</ul>
</section>
<section id="in-the-end" class="level2">
<h2>In the End</h2>
<p>Druid concepts are always emerging naturally</p>
</section>
</section>
<section id="druid" class="level1">
<h1>Druid</h1>
@ -111,8 +136,217 @@
</ul>
<p><strong>arbitrary exploration of billion-row tables tables with sub-second latencies</strong></p>
</section>
<section id="proof" class="level2">
<h2>Proof</h2>
<section id="storage" class="level2">
<h2>Storage</h2>
<ul>
<li>Columnar</li>
<li>Inverted Index</li>
<li>Immutable Segments</li>
</ul>
</section>
<section id="columnar-storage" class="level2">
<h2>Columnar Storage</h2>
</section>
<section id="index" class="level2">
<h2>Index</h2>
<ul>
<li>Values are dictionary encoded</li>
</ul>
<p><code>{&quot;USA&quot; 1, &quot;Canada&quot; 2, &quot;Mexico&quot; 3, ...}</code></p>
<ul>
<li>Bitmap for every dimension value (used by filters)</li>
</ul>
<p><code>&quot;USA&quot; -&gt; [0 1 0 0 1 1 0 0 0]</code></p>
<ul>
<li>Column values (used by aggergation queries)</li>
</ul>
<p><code>[2,1,3,15,1,1,2,8,7]</code></p>
</section>
<section id="data-segments" class="level2">
<h2>Data Segments</h2>
<ul>
<li>Per time interval</li>
<li>skip segments when querying</li>
<li>Immutable</li>
<li>Cache friendly</li>
<li>No locking</li>
<li>Versioned</li>
<li>No locking</li>
<li>Read-write concurrency</li>
</ul>
</section>
<section id="real-time-ingestion" class="level2">
<h2>Real-time ingestion</h2>
<ul>
<li>Via Real-Time Node and Firehose</li>
<li>No redundancy or HA, thus not recommended</li>
<li>Via Indexing Service and Tranquility API</li>
<li>Core API</li>
<li>Integration with Streaming Frameworks</li>
<li>HTTP Server</li>
<li><strong>Kafka Consumer</strong></li>
</ul>
</section>
<section id="batch-ingestion" class="level2">
<h2>Batch Ingestion</h2>
<ul>
<li>File based (HDFS, S3, ...)</li>
</ul>
</section>
<section id="real-time-ingestion-1" class="level2">
<h2>Real-time Ingestion</h2>
<pre><code>Task 1: [ Interval ][ Window ]
Task 2: [ ]
---------------------------------------&gt;
time</code></pre>
<p>Minimum indexing slots =<br />
Data Sources × Partitions × Replicas × 2</p>
</section>
</section>
<section id="querying" class="level1">
<h1>Querying</h1>
<section id="query-types" class="level2">
<h2>Query types</h2>
<ul>
<li>Group by: group by multiple dimensions</li>
<li>Top N: like grouping by a single dimension</li>
<li>Timeseries: without grouping over dimensions</li>
<li>Search: Dimensions lookup</li>
<li>Time Boundary: Find available data timeframe</li>
<li>Metadata queries</li>
</ul>
</section>
<section id="tip" class="level2">
<h2>Tip</h2>
<ul>
<li>Prefer <code>topN</code> over <code>groupBy</code></li>
<li>Prefer <code>timeseries</code> over <code>topN</code></li>
<li>Use limits (and priorities)</li>
</ul>
</section>
<section id="query-spec" class="level2">
<h2>Query Spec</h2>
<ul>
<li>Data source</li>
<li>Dimensions</li>
<li>Interval</li>
<li>Filters</li>
<li>Aggergations</li>
<li>Post Aggregations</li>
<li>Granularity</li>
<li>Context (query configuration)</li>
<li>Limit</li>
</ul>
</section>
<section id="examples" class="level2">
<h2>Example(s)</h2>
<p>TODO</p>
</section>
<section id="caching" class="level2">
<h2>Caching</h2>
<ul>
<li>Historical node level</li>
<li>By segment</li>
<li>Broker Level</li>
<li>By segment and query</li>
<li><code>groupBy</code> is disabled on purpose!</li>
<li>By default - local caching</li>
</ul>
</section>
<section id="load-rules" class="level2">
<h2>Load Rules</h2>
<ul>
<li>Can be defined</li>
<li>What can be set</li>
</ul>
</section>
</section>
<section id="components" class="level1">
<h1>Components</h1>
<section id="druid-components" class="level2">
<h2>Druid Components</h2>
<ul>
<li>Real-time Nodes</li>
<li>Historical Nodes</li>
<li>Broker Nodes</li>
<li>Coordinator</li>
<li>For indexing:</li>
<li>Overlord</li>
<li><p>Middle Manager</p></li>
<li>Deep Storage</li>
<li><p>Metadata Storage</p></li>
<li>Load Balancer</li>
<li><p>Cache</p></li>
</ul>
</section>
<section id="coordinator" class="level2">
<h2>Coordinator</h2>
<p>Manage Segments</p>
</section>
<section id="real-time-nodes" class="level2">
<h2>Real-time Nodes</h2>
<ul>
<li>Pulling data in real-time</li>
<li>Indexing it</li>
</ul>
</section>
<section id="historical-nodes" class="level2">
<h2>Historical Nodes</h2>
<ul>
<li>Keep historical segments</li>
</ul>
</section>
<section id="overlord" class="level2">
<h2>Overlord</h2>
<ul>
<li>Accepts tasks and distributes them to middle manager</li>
</ul>
</section>
<section id="middle-manager" class="level2">
<h2>Middle Manager</h2>
<ul>
<li>Execute submitted tasks via Peons</li>
</ul>
</section>
<section id="broker-nodes" class="level2">
<h2>Broker Nodes</h2>
<ul>
<li>Route query to Real-time and Historical nodes</li>
<li>Merge results</li>
</ul>
</section>
<section id="deep-storage" class="level2">
<h2>Deep Storage</h2>
<ul>
<li>Segments backup (HDFS, S3, ...)</li>
</ul>
</section>
</section>
<section id="considerations-tools" class="level1">
<h1>Considerations &amp; Tools</h1>
<section id="when-not-to-choose-druid" class="level2">
<h2>When <em>not</em> to choose Druid</h2>
<ul>
<li>Data is not time-series</li>
<li>Cardinality is <em>very</em> high</li>
<li>Number of dimensions is high</li>
<li>Setup cost must be avoided</li>
</ul>
</section>
<section id="graphite-metrics" class="level2">
<h2>Graphite (metrics)</h2>
<p><img src="img/graphite.png" alt="Graphite" />__</p>
<p><a href="http://graphite.wikidot.com">Graphite</a></p>
</section>
<section id="pivot-exploring-data" class="level2">
<h2>Pivot (exploring data)</h2>
<p><img src="img/pivot.gif" alt="Pivot" /> </p>
<p><a href="https://github.com/implydata/pivot">Pivot</a></p>
</section>
<section id="caravel-exploring-data" class="level2">
<h2>Caravel (exploring data)</h2>
<p><img src="img/caravel.png" alt="caravel" /> </p>
<p><a href="https://github.com/airbnb/caravel">Caravel</a></p>
</section>
</section>
</div>