Skip to content

Commit 1267cf6

Browse files
lamalexckp-zakCopilot
authored
[ViPPET][SmartNVR] Update SmartNVR Pipeline (open-edge-platform#455)
Co-authored-by: Paweł Żak <pawel.zak@intel.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
1 parent 9184715 commit 1267cf6

File tree

6 files changed

+720
-59
lines changed

6 files changed

+720
-59
lines changed

tools/visual-pipeline-and-platform-evaluation-tool/app.py

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -667,13 +667,13 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
667667

668668
try:
669669
download_file(
670-
"https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4",
671-
"/tmp/person-bicycle-car-detection.mp4",
670+
"https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/people.mp4",
671+
"/tmp/people.mp4",
672672
)
673673
input_video_player = gr.Video(
674674
label="Input Video",
675675
interactive=True,
676-
value="/tmp/person-bicycle-car-detection.mp4",
676+
value="/tmp/people.mp4",
677677
sources="upload",
678678
elem_id="input_video_player",
679679
)
@@ -716,7 +716,7 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
716716
inferencing_channels = gr.Slider(
717717
minimum=0,
718718
maximum=30,
719-
value=11,
719+
value=8,
720720
step=1,
721721
label="Number of Recording + Inferencing channels",
722722
interactive=True,
@@ -727,7 +727,7 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
727727
recording_channels = gr.Slider(
728728
minimum=0,
729729
maximum=30,
730-
value=3,
730+
value=8,
731731
step=1,
732732
label="Number of Recording only channels",
733733
interactive=True,
@@ -806,8 +806,8 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
806806
# Object detection inference interval
807807
object_detection_inference_interval = gr.Slider(
808808
minimum=1,
809-
maximum=5,
810-
value=1,
809+
maximum=6,
810+
value=3,
811811
step=1,
812812
label="Object Detection Inference Interval",
813813
interactive=True,
@@ -830,6 +830,7 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
830830
object_classification_model = gr.Dropdown(
831831
label="Object Classification Model",
832832
choices=[
833+
"Disabled",
833834
"EfficientNet B0 (INT8)" ,
834835
"MobileNet V2 PyTorch (FP16)",
835836
"ResNet-50 TF (INT8)",
@@ -841,7 +842,7 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
841842
# Object classification device
842843
object_classification_device = gr.Dropdown(
843844
label="Object Classification Device",
844-
choices=device_choices,
845+
choices=device_choices + ["Disabled"],
845846
value=preferred_device,
846847
elem_id="object_classification_device",
847848
)
@@ -860,8 +861,8 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
860861
# Object classification inference interval
861862
object_classification_inference_interval = gr.Slider(
862863
minimum=1,
863-
maximum=5,
864-
value=1,
864+
maximum=6,
865+
value=3,
865866
step=1,
866867
label="Object Classification Inference Interval",
867868
interactive=True,
@@ -890,6 +891,13 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
890891
elem_id="object_classification_reclassify_interval",
891892
)
892893

894+
pipeline_watermark_enabled = gr.Checkbox(
895+
label="Overlay inference results on inference channels",
896+
value=True,
897+
elem_id="pipeline_watermark_enabled",
898+
)
899+
900+
893901
# Run button
894902
run_button = gr.Button("Run")
895903

@@ -934,6 +942,7 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
934942
components.add(object_classification_inference_interval)
935943
components.add(object_classification_nireq)
936944
components.add(object_classification_reclassify_interval)
945+
components.add(pipeline_watermark_enabled)
937946

938947
# Interface layout
939948
with gr.Blocks(theme=theme, css=css_code, title=title) as demo:
@@ -1293,6 +1302,9 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
12931302
# Recording Channels
12941303
recording_channels.render()
12951304

1305+
# Whether to overlay result with watermarks
1306+
pipeline_watermark_enabled.render()
1307+
12961308
# Benchmark Parameters Accordion
12971309
with gr.Accordion("Platform Ceiling Analysis Parameters", open=False):
12981310

tools/visual-pipeline-and-platform-evaluation-tool/pipelines/smartnvr/diagram.drawio.svg

Lines changed: 469 additions & 1 deletion
Loading
35 KB
Loading

tools/visual-pipeline-and-platform-evaluation-tool/pipelines/smartnvr/pipeline.py

Lines changed: 82 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -28,43 +28,40 @@ def __init__(self):
2828
"h264parse ! "
2929
"mp4mux ! "
3030
"filesink "
31-
" location={VIDEO_OUTPUT_PATH} "
31+
" location={VIDEO_OUTPUT_PATH} async=false "
3232
)
3333

34+
3435
self._recording_stream = (
3536
"filesrc "
3637
" location={VIDEO_PATH} ! "
3738
"qtdemux ! "
3839
"h264parse ! "
3940
"tee name=t{id} ! "
40-
"queue2 max-size-bytes=0 max-size-time=0 ! "
41+
"queue2 ! "
4142
"mp4mux ! "
4243
"filesink "
4344
" location=/tmp/stream{id}.mp4 "
4445
"t{id}. ! "
45-
"queue2 max-size-bytes=0 max-size-time=0 ! "
46+
"queue2 ! "
4647
"{decoder} ! "
47-
"gvafpscounter starting-frame=1000 ! "
48-
"queue2 max-size-bytes=0 max-size-time=0 ! "
49-
"{postprocessing} ! "
50-
"video/x-raw,width=640,height=360 ! "
51-
"comp.sink_{id} "
48+
"gvafpscounter starting-frame=500 ! "
5249
)
5350

54-
self._inference_stream = (
51+
self._inference_stream_decode_detect_track = (
5552
"filesrc "
5653
" location={VIDEO_PATH} ! "
5754
"qtdemux ! "
5855
"h264parse ! "
5956
"tee name=t{id} ! "
60-
"queue2 max-size-bytes=0 max-size-time=0 ! "
57+
"queue2 ! "
6158
"mp4mux ! "
6259
"filesink "
6360
" location=/tmp/stream{id}.mp4 "
6461
"t{id}. ! "
65-
"queue2 max-size-bytes=0 max-size-time=0 ! "
62+
"queue2 ! "
6663
"{decoder} ! "
67-
"gvafpscounter starting-frame=1000 ! "
64+
"gvafpscounter starting-frame=500 ! "
6865
"gvadetect "
6966
" {detection_model_config} "
7067
" model-instance-id=detect0 "
@@ -73,16 +70,13 @@ def __init__(self):
7370
" batch-size={object_detection_batch_size} "
7471
" inference-interval={object_detection_inference_interval} "
7572
" nireq={object_detection_nireq} ! "
76-
"queue2 "
77-
" max-size-buffers=0 "
78-
" max-size-bytes=0 "
79-
" max-size-time=0 ! "
73+
"queue2 ! "
8074
"gvatrack "
8175
" tracking-type=short-term-imageless ! "
82-
"queue2 "
83-
" max-size-buffers=0 "
84-
" max-size-bytes=0 "
85-
" max-size-time=0 ! "
76+
"queue2 ! "
77+
)
78+
79+
self._inference_stream_classify = (
8680
"gvaclassify "
8781
" {classification_model_config} "
8882
" model-instance-id=classify0 "
@@ -92,19 +86,24 @@ def __init__(self):
9286
" inference-interval={object_classification_inference_interval} "
9387
" nireq={object_classification_nireq} "
9488
" reclassify-interval={object_classification_reclassify_interval} ! "
95-
"queue2 "
96-
" max-size-buffers=0 "
97-
" max-size-bytes=0 "
98-
" max-size-time=0 ! "
99-
"gvawatermark ! "
89+
"queue2 ! "
90+
)
91+
92+
self._inference_stream_metadata_processing = (
10093
"gvametaconvert "
10194
" format=json "
10295
" json-indent=4 "
10396
" source={VIDEO_PATH} ! "
10497
"gvametapublish "
10598
" method=file "
10699
" file-path=/dev/null ! "
107-
"queue2 max-size-bytes=0 max-size-time=0 ! "
100+
)
101+
102+
self._sink_to_compositor = (
103+
"queue2 "
104+
" max-size-buffers={max_size_buffers} "
105+
" max-size-bytes=0 "
106+
" max-size-time=0 ! "
108107
"{postprocessing} ! "
109108
"video/x-raw,width=640,height=360 ! "
110109
"comp.sink_{id} "
@@ -240,18 +239,15 @@ def evaluate(
240239
),
241240
)
242241

243-
# Create the compositor
244-
compositor = self._compositor.format(
245-
**constants,
246-
sinks=sinks,
247-
encoder=_encoder_element,
248-
compositor=_compositor_element,
249-
)
242+
250243

251244
# Create the streams
252245
streams = ""
253246

247+
# Handle inference channels
254248
for i in range(inference_channels):
249+
250+
# Handle object detection parameters and constants
255251
detection_model_config = (
256252
f"model={constants["OBJECT_DETECTION_MODEL_PATH"]} "
257253
f"model-proc={constants["OBJECT_DETECTION_MODEL_PROC"]} "
@@ -262,26 +258,54 @@ def evaluate(
262258
f"model={constants["OBJECT_DETECTION_MODEL_PATH"]} "
263259
)
264260

265-
classification_model_config = (
266-
f"model={constants["OBJECT_CLASSIFICATION_MODEL_PATH"]} "
267-
f"model-proc={constants["OBJECT_CLASSIFICATION_MODEL_PROC"]} "
261+
streams += self._inference_stream_decode_detect_track.format(
262+
**parameters,
263+
**constants,
264+
id=i,
265+
decoder=_decoder_element,
266+
detection_model_config=detection_model_config,
268267
)
269268

270-
if not constants["OBJECT_CLASSIFICATION_MODEL_PROC"]:
269+
# Handle object classification parameters and constants
270+
# Do this only if the object classification model is not disabled or the device is not disabled
271+
if not (constants["OBJECT_CLASSIFICATION_MODEL_PATH"] == "Disabled"
272+
or parameters["object_classification_device"] == "Disabled") :
271273
classification_model_config = (
272274
f"model={constants["OBJECT_CLASSIFICATION_MODEL_PATH"]} "
275+
f"model-proc={constants["OBJECT_CLASSIFICATION_MODEL_PROC"]} "
273276
)
274277

275-
streams += self._inference_stream.format(
278+
if not constants["OBJECT_CLASSIFICATION_MODEL_PROC"]:
279+
classification_model_config = (
280+
f"model={constants["OBJECT_CLASSIFICATION_MODEL_PATH"]} "
281+
)
282+
283+
streams += self._inference_stream_classify.format(
284+
**parameters,
285+
**constants,
286+
id=i,
287+
classification_model_config=classification_model_config,
288+
)
289+
290+
# Overlay inference results on the inferenced video if enabled
291+
if parameters["pipeline_watermark_enabled"]:
292+
streams += "gvawatermark ! "
293+
294+
streams += self._inference_stream_metadata_processing.format(
276295
**parameters,
277296
**constants,
278297
id=i,
279-
decoder=_decoder_element,
280-
postprocessing=_postprocessing_element,
281-
detection_model_config=detection_model_config,
282-
classification_model_config=classification_model_config,
283298
)
284299

300+
# sink to compositor or fake sink depending on the compose flag
301+
streams += self._sink_to_compositor.format(
302+
**parameters,
303+
**constants,
304+
id=i,
305+
postprocessing=_postprocessing_element,
306+
max_size_buffers=0,
307+
)
308+
# Handle regular channels
285309
for i in range(inference_channels, channels):
286310
streams += self._recording_stream.format(
287311
**parameters,
@@ -290,6 +314,21 @@ def evaluate(
290314
decoder=_decoder_element,
291315
postprocessing=_postprocessing_element,
292316
)
317+
# sink to compositor or fake sink depending on the compose flag
318+
streams += self._sink_to_compositor.format(
319+
**parameters,
320+
**constants,
321+
id=i,
322+
postprocessing=_postprocessing_element,
323+
max_size_buffers=1,
324+
)
325+
# Prepend the compositor
326+
streams = self._compositor.format(
327+
**constants,
328+
sinks=sinks,
329+
encoder=_encoder_element,
330+
compositor=_compositor_element,
331+
) + streams
293332

294333
# Evaluate the pipeline
295-
return "gst-launch-1.0 -q " + compositor + " " + streams
334+
return "gst-launch-1.0 -q " + streams

0 commit comments

Comments
 (0)