Jump to content

GstInference/Example pipelines/IMX8: Difference between revisions

m
no edit summary
mNo edit summary
mNo edit summary
 
Line 35: Line 35:
INPUT_LAYER='input'
INPUT_LAYER='input'
OUTPUT_LAYER='Softmax'
OUTPUT_LAYER='Softmax'
</syntaxhighlight>
<syntaxhighlight lang=bash>
GST_DEBUG=inceptionv2:6 gst-launch-1.0 \
GST_DEBUG=inceptionv2:6 gst-launch-1.0 \
multifilesrc location=$IMAGE_FILE start-index=0 stop-index=0 loop=true  ! jpegparse ! jpegdec ! videoconvert ! videoscale ! videorate ! queue ! net.sink_model \
multifilesrc location=$IMAGE_FILE start-index=0 stop-index=0 loop=true  ! jpegparse ! jpegdec ! videoconvert ! videoscale ! videorate ! queue ! net.sink_model \
Line 61: Line 63:
INPUT_LAYER='input'
INPUT_LAYER='input'
OUTPUT_LAYER='Softmax'
OUTPUT_LAYER='Softmax'
</syntaxhighlight>
<syntaxhighlight lang=bash>
GST_DEBUG=inceptionv2:6 gst-launch-1.0 \
GST_DEBUG=inceptionv2:6 gst-launch-1.0 \
filesrc location=$VIDEO_FILE ! decodebin ! videoconvert ! videoscale ! queue ! net.sink_model \
filesrc location=$VIDEO_FILE ! decodebin ! videoconvert ! videoscale ! queue ! net.sink_model \
Line 87: Line 91:
INPUT_LAYER='input'
INPUT_LAYER='input'
OUTPUT_LAYER='Softmax'
OUTPUT_LAYER='Softmax'
</syntaxhighlight>
<syntaxhighlight lang=bash>
GST_DEBUG=inceptionv2:6 gst-launch-1.0 \
GST_DEBUG=inceptionv2:6 gst-launch-1.0 \
v4l2src device=$CAMERA ! videoconvert ! videoscale ! queue ! net.sink_model \
v4l2src device=$CAMERA ! videoconvert ! videoscale ! queue ! net.sink_model \
Line 108: Line 114:
* You will need a v4l2 compatible camera
* You will need a v4l2 compatible camera
* Pipeline
* Pipeline
<syntaxhighlight lang=bash>CAMERA='/dev/video0'
<syntaxhighlight lang=bash>
CAMERA='/dev/video0'
MODEL_LOCATION='graph_inceptionv2_tensorflow.pb'
MODEL_LOCATION='graph_inceptionv2_tensorflow.pb'
INPUT_LAYER='input'
INPUT_LAYER='input'
OUTPUT_LAYER='Softmax'
OUTPUT_LAYER='Softmax'
LABELS='imagenet_labels.txt'
LABELS='imagenet_labels.txt'
</syntaxhighlight>
<syntaxhighlight lang=bash>
gst-launch-1.0 \
gst-launch-1.0 \
v4l2src device=$CAMERA ! "video/x-raw, width=1280, height=720" ! tee name=t \
v4l2src device=$CAMERA ! "video/x-raw, width=1280, height=720" ! tee name=t \
Line 132: Line 141:
INPUT_LAYER='input/Placeholder'
INPUT_LAYER='input/Placeholder'
OUTPUT_LAYER='add_8'
OUTPUT_LAYER='add_8'
</syntaxhighlight>
<syntaxhighlight lang=bash>
GST_DEBUG=tinyyolov2:6 gst-launch-1.0 \
GST_DEBUG=tinyyolov2:6 gst-launch-1.0 \
multifilesrc location=$IMAGE_FILE start-index=0 stop-index=0 loop=true  ! jpegparse ! jpegdec ! videoconvert ! videoscale ! videorate ! queue ! net.sink_model \
multifilesrc location=$IMAGE_FILE start-index=0 stop-index=0 loop=true  ! jpegparse ! jpegdec ! videoconvert ! videoscale ! videorate ! queue ! net.sink_model \
Line 158: Line 169:
INPUT_LAYER='input/Placeholder'
INPUT_LAYER='input/Placeholder'
OUTPUT_LAYER='add_8'
OUTPUT_LAYER='add_8'
</syntaxhighlight>
<syntaxhighlight lang=bash>
GST_DEBUG=tinyyolov2:6 gst-launch-1.0 \
GST_DEBUG=tinyyolov2:6 gst-launch-1.0 \
filesrc location=$VIDEO_FILE ! decodebin ! videoconvert ! videoscale ! queue ! net.sink_model \
filesrc location=$VIDEO_FILE ! decodebin ! videoconvert ! videoscale ! queue ! net.sink_model \
Line 184: Line 197:
INPUT_LAYER='input/Placeholder'
INPUT_LAYER='input/Placeholder'
OUTPUT_LAYER='add_8'
OUTPUT_LAYER='add_8'
</syntaxhighlight>
<syntaxhighlight lang=bash>
GST_DEBUG=tinyyolov2:6 gst-launch-1.0 \
GST_DEBUG=tinyyolov2:6 gst-launch-1.0 \
v4l2src device=$CAMERA ! videoconvert ! videoscale ! queue ! net.sink_model \
v4l2src device=$CAMERA ! videoconvert ! videoscale ! queue ! net.sink_model \
Line 211: Line 226:
OUTPUT_LAYER='add_8'
OUTPUT_LAYER='add_8'
LABELS='labels.txt'
LABELS='labels.txt'
</syntaxhighlight>
<syntaxhighlight lang=bash>
gst-launch-1.0 \
gst-launch-1.0 \
v4l2src device=$CAMERA ! "video/x-raw, width=1280, height=720" ! tee name=t \
v4l2src device=$CAMERA ! "video/x-raw, width=1280, height=720" ! tee name=t \
Cookies help us deliver our services. By using our services, you agree to our use of cookies.