-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathnnstreamer_capi.html
executable file
·481 lines (422 loc) · 19.8 KB
/
nnstreamer_capi.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
<!DOCTYPE html>
<html lang="en">
<head>
<base href=".">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>NNStreamer C-API</title>
<link rel="stylesheet" href="assets/css/dark-frontend.css" type="text/css" title="dark">
<link rel="alternate stylesheet" href="assets/css/light-frontend.css" type="text/css" title="light">
<link rel="stylesheet" href="assets/css/bootstrap-toc.min.css" type="text/css">
<link rel="stylesheet" href="assets/css/jquery.mCustomScrollbar.min.css">
<link rel="stylesheet" href="assets/js/search/enable_search.css" type="text/css">
<link rel="stylesheet" href="assets/css/extra_frontend.css" type="text/css">
<link rel="stylesheet" href="assets/css/prism-tomorrow.css" type="text/css" title="dark">
<link rel="alternate stylesheet" href="assets/css/prism.css" type="text/css" title="light">
<script src="assets/js/mustache.min.js"></script>
<script src="assets/js/jquery.js"></script>
<script src="assets/js/bootstrap.js"></script>
<script src="assets/js/scrollspy.js"></script>
<script src="assets/js/typeahead.jquery.min.js"></script>
<script src="assets/js/search.js"></script>
<script src="assets/js/compare-versions.js"></script>
<script src="assets/js/jquery.mCustomScrollbar.concat.min.js"></script>
<script src="assets/js/bootstrap-toc.min.js"></script>
<script src="assets/js/jquery.touchSwipe.min.js"></script>
<script src="assets/js/anchor.min.js"></script>
<script src="assets/js/tag_filtering.js"></script>
<script src="assets/js/language_switching.js"></script>
<script src="assets/js/styleswitcher.js"></script>
<script src="assets/js/lines_around_headings.js"></script>
<script src="assets/js/prism-core.js"></script>
<script src="assets/js/prism-autoloader.js"></script>
<script src="assets/js/prism_autoloader_path_override.js"></script>
<script src="assets/js/trie.js"></script>
<link rel="icon" type="image/png" href="assets/images/nnstreamer_logo.png">
</head>
<body class="no-script
">
<script>
$('body').removeClass('no-script');
</script>
<nav class="navbar navbar-fixed-top navbar-default" id="topnav">
<div class="container-fluid">
<div class="navbar-right">
<a id="toc-toggle">
<span class="glyphicon glyphicon-menu-right"></span>
<span class="glyphicon glyphicon-menu-left"></span>
</a>
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar-wrapper" aria-expanded="false">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<span title="light mode switch" class="glyphicon glyphicon-sunglasses pull-right" id="lightmode-icon"></span>
<form class="navbar-form pull-right" id="navbar-search-form">
<div class="form-group has-feedback">
<input type="text" class="form-control input-sm" name="search" id="sidenav-lookup-field" placeholder="search" disabled>
<span class="glyphicon glyphicon-search form-control-feedback" id="search-mgn-glass"></span>
</div>
</form>
</div>
<div class="navbar-header">
<a id="sidenav-toggle">
<span class="glyphicon glyphicon-menu-right"></span>
<span class="glyphicon glyphicon-menu-left"></span>
</a>
<a id="home-link" href="index.html" class="hotdoc-navbar-brand">
<img src="assets/images/nnstreamer_logo.png" alt="Home">
</a>
</div>
<div class="navbar-collapse collapse" id="navbar-wrapper">
<ul class="nav navbar-nav" id="menu">
<li class="dropdown">
<a class="dropdown-toggle" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
API References<span class="caret"></span>
</a>
<ul class="dropdown-menu" id="modules-menu">
<li>
<a href="doc-index.html">NNStreamer doc</a>
</li>
<li>
<a href="gst/nnstreamer/README.html">NNStreamer Elements</a>
</li>
<li>
<a href="nnstreamer-example/index.html">NNStreamer Examples</a>
</li>
<li>
<a href="API-reference.html">API reference</a>
</li>
</ul>
</li>
<li>
<a href="doc-index.html">Documents</a>
</li>
<li>
<a href="gst/nnstreamer/README.html">Elements</a>
</li>
<li>
<a href="tutorials.html">Tutorials</a>
</li>
<li>
<a href="API-reference.html">API reference</a>
</li>
</ul>
<div class="hidden-xs hidden-sm navbar-text navbar-center">
</div>
</div>
</div>
</nav>
<main>
<div data-extension="core" data-hotdoc-in-toplevel="True" data-hotdoc-project="NNStreamer" data-hotdoc-ref="nnstreamer_capi.html" class="page_container" id="page-wrapper">
<script src="assets/js/utils.js"></script>
<div class="panel panel-collapse oc-collapsed" id="sidenav" data-hotdoc-role="navigation">
<script src="assets/js/full-width.js"></script>
<div id="sitenav-wrapper">
<iframe src="hotdoc-sitemap.html" id="sitenav-frame"></iframe>
</div>
</div>
<div id="body">
<div id="main">
<div id="page-description" data-hotdoc-role="main">
<p>** NOTICE: ML-API implementation is migrated to <a href="https://github.com/nnstreamer/api">api.git</a>.**</p>
<h1 id="machine-learning-inference">Machine Learning Inference</h1>
<p>You can easily create and efficiently execute data stream pipelines that consist of neural networks as filters in pipelines.</p>
<p>The main features of the Machine Learning Inference API include:</p>
<ul>
<li>
<p>Construction of data pipeline based on <a href="https://gstreamer.freedesktop.org/">GStreamer</a></p>
<p>You can compose the data stream pipeline through Machine Learning Inference with various elements of GStreamer and NNStreamer.</p>
</li>
<li>
<p><a href="nnstreamer_capi.html#single-api">Single</a> API and <a href="nnstreamer_capi.html#pipeline-api">Pipeline</a> API</p>
<p>There are two types of Machine Learning Inference API - Single API and Pipeline API.</p>
<p>Single API is useful for a simple usage scenario of neural network models. It allows invoking a neural network model with a single instance of input data for the model directly. It is useful if you have the input data pre-processed with the application itself and there are no complex interactions between neural network models, data processors, or data stream paths.</p>
<p>Pipeline API allows developers to construct and execute pipelines with multiple neural network models, multiple inputs and output nodes, multiple data processors, pre-and-post processors, and various data path manipulators. Besides, if the input is online data or streamed data, Pipeline API simplifies your application and improves its performance.</p>
</li>
<li>
<p>Support various neural network frameworks (NNFW)</p>
<p>TensorFlow, TensorFlow-Lite, Caffe2, and PyTorch are the supported neural network frameworks. Neural network model files trained by such frameworks can be imported as filters of pipelines directly.
Custom filters, which are neural network models implemented directly with programming languages including C/C++ and Python, maybe imported as filters of pipelines directly as well.</p>
<blockquote>
<p><strong>Note</strong></p>
<p>The devices powered by Tizen OS can contain TensorFlow-Lite only. Ensure that the neural network frameworks that you want to use are installed.</p>
</blockquote>
</li>
</ul>
<h2 id="prerequisites">Prerequisites</h2>
<p>To enable your application to use the machine learning functionality:</p>
<ol>
<li>
<p>To use the functions and data types of the Machine Learning Inference API, include the <code><nnstreamer.h></code> header file in your application:</p>
<pre><code class="language-c">#include <nnstreamer.h>
</code></pre>
</li>
<li>
<p>To use the Machine Learning Inference API, include the following features in your <code>tizen-manifest.xml</code> file:</p>
<pre><code class="language-xml"><feature name="http://tizen.org/feature/machine_learning">true</feature>
<feature name="http://tizen.org/feature/machine_learning.inference">true</feature>
</code></pre>
</li>
</ol>
<h2 id="single-api">Single API</h2>
<p>This section shows how to load a model without the construction of pipelines.</p>
<ol>
<li>
<p>Open a model file:</p>
<pre><code class="language-c">#include <nnstreamer-single.h>
ml_single_h single;
ml_tensors_info_h in_info, out_info;
...
ml_single_open (&single, "model_file_path", in_info, out_info, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
</code></pre>
<p>To load a model file, two <code>ml_tensors_info_h</code> are required. <code>in_info</code> contains the information of the input tensors, and <code>out_info</code> contains the information of the output tensors. For more information, see <a href="nnstreamer_capi.html#tensors-information">Tensors Information</a>.</p>
</li>
<li>
<p>Get the <a href="nnstreamer_capi.html#tensors-information">Tensors Information</a>.</p>
<p>After opening the model, use the following functions to bring the information of the input and output tensors:</p>
<pre><code class="language-c">ml_single_get_input_info (single, &in_info);
ml_single_get_output_info (single, &out_info);
</code></pre>
</li>
<li>
<p>Invoke the model with input and output <a href="nnstreamer_capi.html#tensors-data">Tensors Data</a>.</p>
<p>The model can be invoked with input and output tensors data. The result is included in the output tensors data:</p>
<pre><code class="language-c">ml_tensors_data_create (in_info, &input);
ml_single_invoke (single, input, &output);
</code></pre>
</li>
<li>
<p>Close the opened handle:</p>
<pre><code class="language-c">ml_single_close (single);
</code></pre>
</li>
</ol>
<h2 id="pipeline-api">Pipeline API</h2>
<p>This section shows how to create a pipeline.</p>
<h3 id="basic-usage">Basic Usage</h3>
<ol>
<li>
<p>Construct a pipeline with the GStreamer elements.</p>
<p>Different pipelines can be constructed using various GStreamer elements:</p>
<pre><code class="language-c">char pipeline[] = "videotestsrc num_buffers=2 ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=224,height=224 ! tensor_converter ! fakesink";
ml_pipeline_h handle;
int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle);
</code></pre>
</li>
<li>
<p>Start the pipeline and get state:</p>
<pre><code class="language-c">/* The pipeline could be started when the state is paused */
status = ml_pipeline_start (handle);
status = ml_pipeline_get_state (handle, &state);
</code></pre>
</li>
<li>
<p>Stop the pipeline and get state:</p>
<pre><code class="language-c">status = ml_pipeline_stop (handle);
status = ml_pipeline_get_state (handle, &state);
</code></pre>
</li>
<li>
<p>Destroy the pipeline.</p>
<p>When no longer needed, destroy the pipeline:</p>
<pre><code class="language-c">status = ml_pipeline_destroy (handle);
</code></pre>
</li>
</ol>
<h3 id="element-api">Element API</h3>
<p>You need to manipulate the input and the output data to run neural network models with Machine Learning Inference API. In addition, you can construct pipelines that can be controlled.</p>
<p>Followings are the available elements:</p>
<ul>
<li>
<p><strong>Source</strong></p>
<p>The configuration of the data source element is required to set the input tensor data:</p>
<pre><code class="language-c">char pipeline[] = "appsrc name=srcx ! other/tensor,dimension=(string)4:1:1:1,type=(string)uint8,framerate=(fraction)0/1 ! tensor_sink";
</code></pre>
<p><code>ml_pipeline_src_get_handle()</code> controls the <code>appsrc</code> element with the name <code>srcx</code>:</p>
<pre><code class="language-c">ml_pipeline_h handle;
ml_pipeline_src_h srchandle;
status = ml_pipeline_construct (pipeline, NULL, NULL, &handle);
status = ml_pipeline_start (handle);
status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle);
</code></pre>
<p>You can check the information of input tensors using <code>srchandle</code>:</p>
<pre><code class="language-c">ml_tensors_info_h info;
status = ml_pipeline_src_get_tensors_info (srchandle, &info);
</code></pre>
<p>The input tensor data can be filled according to the <code>info</code>:</p>
<pre><code class="language-c">ml_tensors_data_h data;
status = ml_tensors_data_create (info, &data);
for (i = 0; i < 10; i++) {
uintarray1[i] = (uint8_t *) malloc (4);
uintarray1[i][0] = i + 4;
uintarray1[i][1] = i + 1;
uintarray1[i][2] = i + 3;
uintarray1[i][3] = i + 2;
}
status = ml_tensors_data_set_tensor_data (data, 0, uintarray1[0], 4);
/* Setting the policy of raw data pointer */
status = ml_pipeline_src_input_data (srchandle, data, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
</code></pre>
<p>After using the data source element, release the handle:</p>
<pre><code class="language-c">status = ml_pipeline_src_release_handle (srchandle);
</code></pre>
</li>
<li>
<p><strong>Sink</strong></p>
<p>The configuration of the data sink element is required to get the output tensor data:</p>
<pre><code class="language-c">char pipeline[] = "videotestsrc num-buffers=3 ! videoconvert ! tensor_converter ! appsink name=sinkx sync=false";
</code></pre>
<p><code>appsink</code> element with the name <code>sinkx</code> becomes reachable through <code>ml_pipeline_sink_register()</code>:</p>
<pre><code class="language-c">int status;
ml_pipeline_h handle;
ml_pipeline_sink_h sinkhandle;
status = ml_pipeline_sink_register (handle, "sinkx", sink_callback, user_data, &sinkhandle);
</code></pre>
<p>You can get the data from <code>sink_callback()</code>, whenever <code>appsink</code> named <code>sinkx</code> receives data:</p>
<pre><code class="language-c">typedef void (*ml_pipeline_sink_cb) (const ml_tensors_data_h data, const ml_tensors_info_h info, void *user_data);
</code></pre>
<p>Release the <code>sinkhandle</code> through <code>ml_pipeline_sink_unregister()</code>:</p>
<pre><code class="language-c">status = ml_pipeline_sink_unregister (sinkhandle);
</code></pre>
</li>
<li>
<p><strong>Valve</strong></p>
<p>This element is used to control the stream of a pipeline:</p>
<pre><code class="language-c">char pipeline[] = "videotestsrc is-live=true ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=16,height=16,framerate=10/1 ! tensor_converter ! valve name=valve1 ! fakesink";
int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle);
</code></pre>
<p>By default, valve named <code>valve1</code> of the pipeline is opened. You can control the valve using <code>ml_pipeline_valve_h</code>:</p>
<pre><code class="language-c">ml_pipeline_h handle;
ml_pipeline_valve_h valve1;
status = ml_pipeline_valve_get_handle (handle, "valve1", &valve1);
</code></pre>
<p>After you start a pipeline, you can control the stream of the pipeline with a valve:</p>
<pre><code class="language-c">status = ml_pipeline_start (handle);
status = ml_pipeline_valve_set_open (valve1, false); /* Close */
</code></pre>
<p>You can also open the pipeline by controlling the stream of a pipeline with a valve:</p>
<pre><code class="language-c">status = ml_pipeline_valve_set_open (valve1, true); /* Open */
</code></pre>
<p>Before you destroy the pipeline, release <code>ml_pipeline_valve_h</code>:</p>
<pre><code class="language-c">status = ml_pipeline_valve_release_handle (valve1); /* Release valve handle */
</code></pre>
</li>
<li>
<p><strong>Switch</strong></p>
<p>The switch element is used when you need only one working branch from a pipeline that has multiple branches:</p>
<p><img src="Documentation/media/input-selector.png" alt="input-selector" id="inputselector"></p>
<pre><code class="language-c">char pipeline[] = "input-selector name=ins ! tensor_converter ! tensor_sink name=sinkx videotestsrc is-live=true ! videoconvert ! ins.sink_0 videotestsrc num-buffers=3 is-live=true ! videoconvert ! ins.sink_1";
</code></pre>
<p>Get <code>ml_pipeline_switch_h</code>. The name of the switch in this pipeline is <code>ins</code>:</p>
<pre><code class="language-c">ml_pipeline_h handle;
ml_pipeline_switch_h switchhandle;
ml_pipeline_switch_e type;
status = ml_pipeline_construct (pipeline, NULL, NULL, &handle);
status = ml_pipeline_switch_get_handle (handle, "ins", &type, &switchhandle);
</code></pre>
<p>You can control the switch using the handle <code>ml_pipeline_switch_h</code>:</p>
<pre><code class="language-c">status = ml_pipeline_switch_select (switchhandle, "sink_1");
</code></pre>
<p>Before you destroy the pipeline, release <code>ml_pipeline_switch_h</code>:</p>
<pre><code class="language-c">status = ml_pipeline_switch_release_handle (switchhandle);
</code></pre>
<p>The following image shows the switch at the end of the pipeline:</p>
<p><img src="Documentation/media/output-selector.png" alt="output-selector" id="outputselector"></p>
<pre><code class="language-c">char pipeline[] = "videotestsrc is-live=true ! videoconvert ! tensor_converter ! output-selector name=outs outs.src_0 ! tensor_sink name=sink0 async=false outs.src_1 ! tensor_sink name=sink1 async=false"
</code></pre>
</li>
</ul>
<h3 id="pipeline-states">Pipeline States</h3>
<p>For more information about the pipeline states, see <a href="https://gstreamer.freedesktop.org/documentation/plugin-development/basics/states.html">GStreamer guide</a>.</p>
<h2 id="tensors-information">Tensors Information</h2>
<p><code>ml_tensors_info_h</code> contains the information of tensors. The tensor info can be managed using the following functions:</p>
<ul>
<li>
<p><strong>Create and destroy</strong></p>
<pre><code class="language-c">ml_tensors_info_h info;
status = ml_tensors_info_create (&info);
status = ml_tensors_info_destroy (info);
</code></pre>
</li>
<li>
<p><strong>Set functions</strong></p>
<pre><code class="language-c">/* Set how many tensors exist */
status = ml_tensors_info_set_count (info, 1);
/* Set the type of the tensor_0 as UINT8 */
status = ml_tensors_info_set_tensor_type (info, 0, ML_TENSOR_TYPE_UINT8);
/* Set the dimension of the tensor_0 as in_dim */
status = ml_tensors_info_set_tensor_dimension (info, 0, in_dim);
/* Set the name of the tensor_0 as "tensor-name-test" */
status = ml_tensors_info_set_tensor_name (info, 0, "tensor-name-test");
</code></pre>
</li>
<li>
<p><strong>Get functions</strong></p>
<pre><code class="language-c">/* Get how many tensors exist */
status = ml_tensors_info_get_count (info, &num);
/* Get the type of the tensor_0 */
status = ml_tensors_info_get_tensor_type (info, 0, &out_type);
/* Get the dimension of the tensor_0 */
status = ml_tensors_info_get_tensor_dimension (info, 0, in_dim);
/* Get the name of the tensor_0 */
status = ml_tensors_info_get_tensor_name (info, 0, &out_name);
/* Get the size of the tensor_0 */
status = ml_tensors_info_get_tensor_size (info, 0, &data_size);
</code></pre>
</li>
</ul>
<h2 id="tensors-data">Tensors Data</h2>
<p><code>ml_tensors_data_h</code> contains the raw data of tensors. The tensor data can be managed using the following functions:</p>
<ul>
<li>
<p><strong>Create and destroy</strong></p>
<pre><code class="language-c">ml_tensors_data_h data;
ml_tensors_info_h info;
status = ml_tensors_data_create (info, &data);
status = ml_tensors_data_destroy (data);
</code></pre>
</li>
<li>
<p><strong>Get and set tensor data</strong></p>
<pre><code class="language-c">/* Get tensor data */
void *data_ptr;
size_t data_size;
status = ml_tensors_data_get_tensor_data (data, 0, &data_ptr, &data_size);
/* Set tensor data */
uint8_t dummy[4] = {1, 1, 1, 1};
status = ml_tensors_data_set_tensor_data (data, 0, dummy, 1);
</code></pre>
</li>
</ul>
<h2 id="related-information">Related Information</h2>
<ul>
<li>Dependencies
<ul>
<li>Tizen 5.5 and Higher for Mobile</li>
<li>Tizen 5.5 and Higher for Wearable</li>
</ul>
</li>
</ul>
</div>
</div>
<div id="search_results">
<p>The results of the search are</p>
</div>
<div id="footer">
</div>
</div>
<div id="toc-column">
<div class="edit-button">
</div>
<div id="toc-wrapper">
<nav id="toc"></nav>
</div>
</div>
</div>
</main>
<script src="assets/js/navbar_offset_scroller.js"></script>
</body>
</html>