investigating why the input to the model is incorrect resulting in error on initialization.
This commit is contained in:
+1
-1
@@ -1,5 +1,5 @@
|
||||
import 'package:flutter/material.dart';
|
||||
import 'package:tensordex_mobile/ui/home.dart';
|
||||
import 'package:tensordex_mobile/ui/tensordex_home.dart';
|
||||
import 'package:tensordex_mobile/utils/logger.dart';
|
||||
|
||||
Future<void> main() async {
|
||||
|
||||
@@ -0,0 +1,160 @@
|
||||
import 'dart:math';
|
||||
import 'dart:ui';
|
||||
|
||||
import 'package:collection/collection.dart';
|
||||
import 'package:image/image.dart' as image_lib;
|
||||
import 'package:tflite_flutter/tflite_flutter.dart';
|
||||
import 'package:tflite_flutter_helper/tflite_flutter_helper.dart';
|
||||
|
||||
import '../utils/logger.dart';
|
||||
import '../utils/recognition.dart';
|
||||
import '../utils/stats.dart';
|
||||
|
||||
/// Classifier
|
||||
class Classifier {
|
||||
static const String MODEL_FILE_NAME = "detect.tflite";
|
||||
static const String LABEL_FILE_NAME = "labelmap.txt";
|
||||
|
||||
/// Input size of image (height = width = 300)
|
||||
static const int INPUT_SIZE = 224;
|
||||
|
||||
/// Result score threshold
|
||||
static const double THRESHOLD = 0.5;
|
||||
|
||||
/// [ImageProcessor] used to pre-process the image
|
||||
ImageProcessor? imageProcessor;
|
||||
|
||||
/// Padding the image to transform into square
|
||||
// int padSize = 0;
|
||||
/// Instance of Interpreter
|
||||
late Interpreter _interpreter;
|
||||
|
||||
late TensorBuffer _outputBuffer;
|
||||
late var _probabilityProcessor;
|
||||
|
||||
/// Labels file loaded as list
|
||||
late List<String> _labels;
|
||||
|
||||
/// Number of results to show
|
||||
static const int NUM_RESULTS = 10;
|
||||
|
||||
Classifier({
|
||||
Interpreter? interpreter,
|
||||
List<String>? labels,
|
||||
}) {
|
||||
loadModel(interpreter: interpreter);
|
||||
loadLabels(labels: labels);
|
||||
}
|
||||
|
||||
/// Loads interpreter from asset
|
||||
void loadModel({Interpreter? interpreter}) async {
|
||||
try {
|
||||
_interpreter = interpreter ??
|
||||
await Interpreter.fromAsset(
|
||||
MODEL_FILE_NAME,
|
||||
options: InterpreterOptions()..threads = 4,
|
||||
);
|
||||
var outputTensor = _interpreter.getOutputTensor(0);
|
||||
var outputShape = outputTensor.shape;
|
||||
var outputType = outputTensor.type;
|
||||
|
||||
var inputTensor = _interpreter.getInputTensor(0);
|
||||
var intputShape = inputTensor.shape;
|
||||
var intputType = inputTensor.type;
|
||||
|
||||
_outputBuffer = TensorBuffer.createFixedSize(outputShape, outputType);
|
||||
_probabilityProcessor =
|
||||
TensorProcessorBuilder().add(NormalizeOp(0, 1)).build();
|
||||
} catch (e) {
|
||||
logger.e("Error while creating interpreter: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads labels from assets
|
||||
void loadLabels({List<String>? labels}) async {
|
||||
try {
|
||||
_labels = labels ?? await FileUtil.loadLabels("assets/labels.txt");
|
||||
} catch (e) {
|
||||
logger.e("Error while loading labels: $e");
|
||||
}
|
||||
}
|
||||
|
||||
/// Pre-process the image
|
||||
TensorImage? getProcessedImage(TensorImage inputImage) {
|
||||
// padSize = max(inputImage.height, inputImage.width);
|
||||
imageProcessor ??= ImageProcessorBuilder()
|
||||
// .add(ResizeWithCropOrPadOp(padSize, padSize))
|
||||
.add(ResizeOp(INPUT_SIZE, INPUT_SIZE, ResizeMethod.BILINEAR))
|
||||
.add(NormalizeOp(127.5, 127.5))
|
||||
.build();
|
||||
return imageProcessor?.process(inputImage);
|
||||
}
|
||||
|
||||
/// Runs object detection on the input image
|
||||
Map<String, dynamic>? predict(image_lib.Image image) {
|
||||
logger.i(labels);
|
||||
var predictStartTime = DateTime.now().millisecondsSinceEpoch;
|
||||
if (_interpreter == null) {
|
||||
logger.e("Interpreter not initialized");
|
||||
return null;
|
||||
}
|
||||
var preProcessStart = DateTime.now().millisecondsSinceEpoch;
|
||||
// Create TensorImage from image
|
||||
// Pre-process TensorImage
|
||||
var procImage = getProcessedImage(TensorImage.fromImage(image));
|
||||
|
||||
var preProcessElapsedTime =
|
||||
DateTime.now().millisecondsSinceEpoch - preProcessStart;
|
||||
if (procImage != null) {
|
||||
var inferenceTimeStart = DateTime.now().millisecondsSinceEpoch;
|
||||
// run inference
|
||||
var inferenceTimeElapsed =
|
||||
DateTime.now().millisecondsSinceEpoch - inferenceTimeStart;
|
||||
|
||||
logger.i("Sending image to ML");
|
||||
|
||||
logger.i(procImage.buffer.asFloat32List());
|
||||
logger.i(procImage.width);
|
||||
logger.i(procImage.height);
|
||||
logger.i(procImage.tensorBuffer.shape);
|
||||
logger.i(procImage.tensorBuffer.isDynamic);
|
||||
_interpreter.run(procImage.buffer, _outputBuffer.getBuffer());
|
||||
|
||||
Map<String, double> labeledProb = TensorLabel.fromList(
|
||||
labels, _probabilityProcessor.process(_outputBuffer))
|
||||
.getMapWithFloatValue();
|
||||
final pred = getTopProbability(labeledProb);
|
||||
Recognition rec = Recognition(1, pred.key, pred.value);
|
||||
var predictElapsedTime = DateTime.now().millisecondsSinceEpoch - predictStartTime;
|
||||
return {
|
||||
"recognitions": rec,
|
||||
"stats": Stats(predictElapsedTime, predictElapsedTime, predictElapsedTime, predictElapsedTime),
|
||||
};
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the interpreter instance
|
||||
Interpreter get interpreter => _interpreter;
|
||||
|
||||
/// Gets the loaded labels
|
||||
List<String> get labels => _labels;
|
||||
}
|
||||
|
||||
MapEntry<String, double> getTopProbability(Map<String, double> labeledProb) {
|
||||
var pq = PriorityQueue<MapEntry<String, double>>(compare);
|
||||
pq.addAll(labeledProb.entries);
|
||||
|
||||
return pq.first;
|
||||
}
|
||||
|
||||
int compare(MapEntry<String, double> e1, MapEntry<String, double> e2) {
|
||||
if (e1.value > e2.value) {
|
||||
return -1;
|
||||
} else if (e1.value == e2.value) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
+36
-1
@@ -2,6 +2,9 @@ import 'dart:isolate';
|
||||
|
||||
import 'package:camera/camera.dart';
|
||||
import 'package:flutter/material.dart';
|
||||
import 'package:tensordex_mobile/tflite/classifier.dart';
|
||||
import 'package:tflite_flutter/tflite_flutter.dart';
|
||||
import 'package:tensordex_mobile/utils/image_utils.dart';
|
||||
|
||||
import '../utils/logger.dart';
|
||||
import '../utils/recognition.dart';
|
||||
@@ -30,10 +33,13 @@ class _CameraViewState extends State<CameraView> with WidgetsBindingObserver {
|
||||
|
||||
/// Controller
|
||||
late CameraController cameraController;
|
||||
Interpreter? interp;
|
||||
|
||||
/// true when inference is ongoing
|
||||
bool predicting = false;
|
||||
|
||||
late Classifier classy;
|
||||
|
||||
// /// Instance of [Classifier]
|
||||
// Classifier classifier;
|
||||
//
|
||||
@@ -56,9 +62,28 @@ class _CameraViewState extends State<CameraView> with WidgetsBindingObserver {
|
||||
// Camera initialization
|
||||
initializeCamera();
|
||||
|
||||
// final gpuDelegateV2 = GpuDelegateV2(
|
||||
// options: GpuDelegateOptionsV2(
|
||||
// isPrecisionLossAllowed: false,
|
||||
// inferencePreference: TfLiteGpuInferenceUsage.fastSingleAnswer,
|
||||
// inferencePriority1: TfLiteGpuInferencePriority.minLatency,
|
||||
// inferencePriority2: TfLiteGpuInferencePriority.auto,
|
||||
// inferencePriority3: TfLiteGpuInferencePriority.auto,
|
||||
// ));
|
||||
|
||||
|
||||
logger.e("CREATING THE INTERPRETOR");
|
||||
var interpreterOptions = InterpreterOptions();//..addDelegate(gpuDelegateV2);
|
||||
interp = await Interpreter.fromAsset('efficientnet_v2s.tflite',
|
||||
options: interpreterOptions);
|
||||
logger.e("CREATING THE INTERPRETOR");
|
||||
|
||||
classy = Classifier(interpreter: interp);
|
||||
logger.i(interp?.getOutputTensors());
|
||||
// Create an instance of classifier to load model and labels
|
||||
// classifier = Classifier();
|
||||
|
||||
|
||||
// Initially predicting = false
|
||||
predicting = false;
|
||||
}
|
||||
@@ -94,7 +119,7 @@ class _CameraViewState extends State<CameraView> with WidgetsBindingObserver {
|
||||
@override
|
||||
Widget build(BuildContext context) {
|
||||
// Return empty container while the camera is not initialized
|
||||
if (!cameraController.value.isInitialized || cameraController == null) {
|
||||
if (!cameraController.value.isInitialized) {
|
||||
return Container();
|
||||
}
|
||||
|
||||
@@ -114,6 +139,16 @@ class _CameraViewState extends State<CameraView> with WidgetsBindingObserver {
|
||||
predicting = true;
|
||||
});
|
||||
logger.i("RECIEVED IMAGE");
|
||||
logger.i(cameraImage.format.group);
|
||||
logger.i(cameraImage);
|
||||
var converted = ImageUtils.convertCameraImage(cameraImage);
|
||||
if (converted != null){
|
||||
|
||||
var result = classy.predict(converted);
|
||||
|
||||
logger.e("PREDICTED IMAGE");
|
||||
logger.i(result);
|
||||
}
|
||||
// logger.i(cameraImage);
|
||||
// logger.i(cameraImage.height);
|
||||
// logger.i(cameraImage.width);
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
import 'package:flutter/material.dart';
|
||||
import 'package:tensordex_mobile/ui/poke_view.dart';
|
||||
import 'package:tensordex_mobile/utils/recognition.dart';
|
||||
|
||||
import '../utils/logger.dart';
|
||||
|
||||
/// [CameraView] sends each frame for inference
|
||||
class ResultsView extends StatefulWidget {
|
||||
|
||||
/// Constructor
|
||||
const ResultsView({Key? key}) : super(key: key);
|
||||
|
||||
|
||||
void setResults(Recognition results){
|
||||
logger.i("RESULTS IN THE RESULT VIEW");
|
||||
}
|
||||
|
||||
@override
|
||||
State<ResultsView> createState() => _ResultsViewState();
|
||||
}
|
||||
|
||||
class _ResultsViewState extends State<ResultsView> {
|
||||
|
||||
@override
|
||||
void initState() {
|
||||
super.initState();
|
||||
}
|
||||
|
||||
@override
|
||||
Widget build(BuildContext context) {
|
||||
return Text("data");
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
import 'package:flutter/material.dart';
|
||||
import 'package:camera/camera.dart';
|
||||
import 'package:tensordex_mobile/ui/poke_view.dart';
|
||||
import 'package:tensordex_mobile/ui/results_view.dart';
|
||||
|
||||
import '../utils/logger.dart';
|
||||
import '../utils/recognition.dart';
|
||||
@@ -25,7 +25,6 @@ class TensordexHome extends StatefulWidget {
|
||||
}
|
||||
|
||||
class _TensordexHomeState extends State<TensordexHome> {
|
||||
int _counter = 0;
|
||||
|
||||
/// Results to draw bounding boxes
|
||||
List<Recognition>? results;
|
||||
@@ -38,7 +37,6 @@ class _TensordexHomeState extends State<TensordexHome> {
|
||||
|
||||
void _incrementCounter() {
|
||||
setState(() {
|
||||
_counter++;
|
||||
logger.d("Counter Incremented!");
|
||||
logger.w("Counter Incremented!");
|
||||
logger.e("Counter Incremented!");
|
||||
@@ -129,8 +127,6 @@ class _TensordexHomeState extends State<TensordexHome> {
|
||||
|
||||
@override
|
||||
void dispose() {
|
||||
// controller.dispose();
|
||||
// WidgetsBinding.instance.removeObserver(this);
|
||||
super.dispose();
|
||||
}
|
||||
|
||||
@@ -158,17 +154,10 @@ class _TensordexHomeState extends State<TensordexHome> {
|
||||
child: Column(
|
||||
mainAxisAlignment: MainAxisAlignment.center,
|
||||
children: <Widget>[
|
||||
const Text(
|
||||
'You have pushed the button this many times:',
|
||||
),
|
||||
Text(
|
||||
'$_counter',
|
||||
style: Theme.of(context).textTheme.headline4,
|
||||
),
|
||||
CameraView(
|
||||
resultsCallback: resultsCallback,
|
||||
statsCallback: statsCallback
|
||||
),
|
||||
statsCallback: statsCallback),
|
||||
const ResultsView(),
|
||||
],
|
||||
),
|
||||
),
|
||||
Reference in New Issue
Block a user