GstInference/Example pipelines with hierarchical metadata: Difference between revisions

From RidgeRun Developer Wiki
mNo edit summary
No edit summary
Line 306: Line 306:
   tinyyolov3: "output_boxes",
   tinyyolov3: "output_boxes",
   facenetv1: "output"
   facenetv1: "output"
};
var model_names = {
  inceptionv1: "graph_inceptionv1_tensorflow.pb",
  inceptionv2: "graph_inceptionv2_tensorflow.pb",
  inceptionv3: "graph_inceptionv3_tensorflow.pb",
  inceptionv4: "graph_inceptionv4_tensorflow.pb",
  mobilenetv2: "graph_mobilenetv2_tensorflow.pb",
  resnet50v1: "graph_resnetv1_tensorflow.pb",
  tinyyolov2: "graph_tinyyolov2_tensorflow.pb",
  tinyyolov3: "graph_tinyyolov3_tensorflow.pb",
  facenetv1: "graph_facenetv1_tensorflow.pb"
};
};


Line 424: Line 412:
       disable_element("outputlayer");
       disable_element("outputlayer");
       break;
       break;
  }
  if (document.getElementById("model").value != "") {
    model_selection();
   }
   }
}
}


function model_selection() {
function model_selection() {
   if( document.getElementById("backend").value == "tensorflow") {
   document.getElementById("labels").value = label_files[document.getElementById("model").value];
     document.getElementById("inputlayer").value = input_layers[document.getElementById("model").value];
  var tmp_model_location = "";
    document.getElementById("outputlayer").value = output_layers[document.getElementById("model").value];
  var tmp_backend = document.getElementById("backend").value;
  switch (tmp_backend) {
     case "tensorflow":
      document.getElementById("inputlayer").value = input_layers[document.getElementById("model").value];
      document.getElementById("outputlayer").value = output_layers[document.getElementById("model").value];
      tmp_model_location = "graph_" +  document.getElementById("model").value + "_tensorflow.pb";
    break;
    case "tflite":
      tmp_model_location = "graph_" +  document.getElementById("model").value + "_tflite.pb";
    break;
    case "ncsdk":
      tmp_model_location = "graph_" +  document.getElementById("model").value + "_ncsdk";
    break;
    default:
      tmp_model_location = "";
    break;
   }
   }
   document.getElementById("model_location").value = model_names[document.getElementById("model").value];
   document.getElementById("model_location").value = tmp_model_location;
  document.getElementById("labels").value = label_files[document.getElementById("model").value];
}
}


Line 452: Line 457:
}
}


// TODO: modify for platform
function tee_selection() {
function tee_selection() {
   if (model != "") {
   if (model != "") {

Revision as of 16:04, 17 March 2020




Previous: Example pipelines/IMX8 Index Next: Example Applications




Sample pipelines

The following section contains a tool for generating simple GStreamer pipelines with one model of a selected architecture using our hierarchical inference metadata. If you are using and older version, you chan check the legacy pipelines section. Please make sure to check the documentation to understand the property usage for each element.

The required elements are:

  • Backend
  • Model
  • Model location
  • Labels
  • Source
  • Sink

The optional elements include:

  • inferencefilter
  • inferencrop
  • inferenceoverlay
Detection with new metadata

Pipeline generator

The following tool will provide simple pipelines according to the selected elements.

Optional utilites

The following elements are optional yet very useful. Check the documentation for more details on their properties.


Advanced pipelines

Previous: Example pipelines/IMX8 Index Next: Example Applications