fixed classifier and added in a preliminary results view that shows what pokemon are currently being looked at.

This commit is contained in:
Lucas Oskorep
2022-06-22 21:44:15 -04:00
parent ebfbfb503d
commit 9ec737db46
10 changed files with 305 additions and 355 deletions
+57 -80
View File
@@ -1,42 +1,35 @@
import 'dart:math';
import 'dart:ui';
import 'package:collection/collection.dart';
import 'package:image/image.dart' as image_lib;
import 'package:tflite_flutter/tflite_flutter.dart';
import 'package:tflite_flutter_helper/tflite_flutter_helper.dart';
import '../utils/logger.dart';
import '../utils/recognition.dart';
import '../utils/stats.dart';
import 'data/recognition.dart';
import 'data/stats.dart';
/// Classifier
class Classifier {
static const String MODEL_FILE_NAME = "detect.tflite";
static const String LABEL_FILE_NAME = "labelmap.txt";
/// Input size of image (height = width = 300)
static const int INPUT_SIZE = 224;
/// Result score threshold
static const double THRESHOLD = 0.5;
static const String modelFileName = "efficientnet_v2s.tflite";
static const int inputSize = 224;
/// [ImageProcessor] used to pre-process the image
ImageProcessor? imageProcessor;
/// Padding the image to transform into square
// int padSize = 0;
///Tensor image to move image data into
late TensorImage _inputImage;
/// Instance of Interpreter
late Interpreter _interpreter;
late TensorBuffer _outputBuffer;
late var _probabilityProcessor;
late TfLiteType _inputType;
late TfLiteType _outputType;
late SequentialProcessor<TensorBuffer> _outputProcessor;
/// Labels file loaded as list
late List<String> _labels;
int classifierCreationStart = -1;
/// Number of results to show
static const int NUM_RESULTS = 10;
Classifier({
Interpreter? interpreter,
@@ -51,19 +44,18 @@ class Classifier {
try {
_interpreter = interpreter ??
await Interpreter.fromAsset(
MODEL_FILE_NAME,
options: InterpreterOptions()..threads = 4,
modelFileName,
options: InterpreterOptions()..threads = 8,
);
var outputTensor = _interpreter.getOutputTensor(0);
var outputShape = outputTensor.shape;
var outputType = outputTensor.type;
_outputType = outputTensor.type;
var inputTensor = _interpreter.getInputTensor(0);
var intputShape = inputTensor.shape;
var intputType = inputTensor.type;
_outputBuffer = TensorBuffer.createFixedSize(outputShape, outputType);
_probabilityProcessor =
// var intputShape = inputTensor.shape;
_inputType = inputTensor.type;
_inputImage = TensorImage(_inputType);
_outputBuffer = TensorBuffer.createFixedSize(outputShape, _outputType);
_outputProcessor =
TensorProcessorBuilder().add(NormalizeOp(0, 1)).build();
} catch (e) {
logger.e("Error while creating interpreter: ", e);
@@ -80,61 +72,45 @@ class Classifier {
}
/// Pre-process the image
TensorImage? getProcessedImage(TensorImage inputImage) {
TensorImage? getProcessedImage(TensorImage? inputImage) {
// padSize = max(inputImage.height, inputImage.width);
imageProcessor ??= ImageProcessorBuilder()
// .add(ResizeWithCropOrPadOp(padSize, padSize))
.add(ResizeOp(INPUT_SIZE, INPUT_SIZE, ResizeMethod.BILINEAR))
.add(NormalizeOp(127.5, 127.5))
.build();
return imageProcessor?.process(inputImage);
if (inputImage != null) {
imageProcessor ??= ImageProcessorBuilder()
.add(ResizeWithCropOrPadOp(224, 224))
.add(ResizeOp(inputSize, inputSize, ResizeMethod.BILINEAR))
// .add(NormalizeOp(127.5, 127.5))
.build();
return imageProcessor?.process(inputImage);
}
return null;
}
/// Runs object detection on the input image
Map<String, dynamic>? predict(image_lib.Image image) {
logger.i(labels);
var predictStartTime = DateTime.now().millisecondsSinceEpoch;
if (_interpreter == null) {
logger.e("Interpreter not initialized");
return null;
}
var preProcessStart = DateTime.now().millisecondsSinceEpoch;
// Create TensorImage from image
// Pre-process TensorImage
var procImage = getProcessedImage(TensorImage.fromImage(image));
var preProcessElapsedTime =
DateTime.now().millisecondsSinceEpoch - preProcessStart;
if (procImage != null) {
var inferenceTimeStart = DateTime.now().millisecondsSinceEpoch;
// run inference
var inferenceTimeElapsed =
DateTime.now().millisecondsSinceEpoch - inferenceTimeStart;
logger.i("Sending image to ML");
logger.i(procImage.buffer.asFloat32List());
logger.i(procImage.width);
logger.i(procImage.height);
logger.i(procImage.tensorBuffer.shape);
logger.i(procImage.tensorBuffer.isDynamic);
_interpreter.run(procImage.buffer, _outputBuffer.getBuffer());
Map<String, double> labeledProb = TensorLabel.fromList(
labels, _probabilityProcessor.process(_outputBuffer))
.getMapWithFloatValue();
final pred = getTopProbability(labeledProb);
Recognition rec = Recognition(1, pred.key, pred.value);
var predictElapsedTime = DateTime.now().millisecondsSinceEpoch - predictStartTime;
return {
"recognitions": rec,
"stats": Stats(predictElapsedTime, predictElapsedTime, predictElapsedTime, predictElapsedTime),
};
} else {
return null;
}
var preProcStart = DateTime.now().millisecondsSinceEpoch;
_inputImage.loadImage(image);
_inputImage = getProcessedImage(_inputImage)!;
var inferenceStart = DateTime.now().millisecondsSinceEpoch;
_interpreter.run(_inputImage.buffer, _outputBuffer.getBuffer());
var postProcStart = DateTime.now().millisecondsSinceEpoch;
Map<String, double> labeledProb = TensorLabel.fromList(
labels, _outputProcessor.process(_outputBuffer))
.getMapWithFloatValue();
final predictions = getTopProbabilities(labeledProb, number: 5)
.mapIndexed(
(index, element) => Recognition(index, element.key, element.value))
.toList();
var endTime = DateTime.now().millisecondsSinceEpoch;
return {
"recognitions": predictions,
"stats": Stats(
totalTime: endTime - preProcStart,
preProcessingTime: inferenceStart - preProcStart,
inferenceTime: postProcStart - inferenceStart,
postProcessingTime: endTime - postProcStart,
),
};
}
/// Gets the interpreter instance
Interpreter get interpreter => _interpreter;
@@ -142,11 +118,12 @@ class Classifier {
List<String> get labels => _labels;
}
MapEntry<String, double> getTopProbability(Map<String, double> labeledProb) {
List<MapEntry<String, double>> getTopProbabilities(
Map<String, double> labeledProb,
{int number = 3}) {
var pq = PriorityQueue<MapEntry<String, double>>(compare);
pq.addAll(labeledProb.entries);
return pq.first;
return [for (var i = 0; i < number; i += 1) pq.removeFirst()];
}
int compare(MapEntry<String, double> e1, MapEntry<String, double> e2) {
+18
View File
@@ -0,0 +1,18 @@
class Stats {
int totalTime;
int preProcessingTime;
int inferenceTime;
int postProcessingTime;
Stats(
{this.totalTime = -1,
this.preProcessingTime = -1,
this.inferenceTime = -1,
this.postProcessingTime = -1});
@override
String toString() {
return 'Stats{totalPredictTime: $totalTime, preProcessingTime: $preProcessingTime, inferenceTime: $inferenceTime, postProcessingTime: $postProcessingTime}';
}
}
+62
View File
@@ -0,0 +1,62 @@
import 'dart:isolate';
import 'package:camera/camera.dart';
import 'package:tensordex_mobile/tflite/classifier.dart';
import 'package:tflite_flutter/tflite_flutter.dart';
import '../utils/image_utils.dart';
import '../utils/logger.dart';
class IsolateBase {
final ReceivePort _receivePort = ReceivePort();
}
class MLIsolate extends IsolateBase {
static const String debugIsolate = "MLIsolate";
late SendPort _sendPort;
SendPort get sendPort => _sendPort;
Future<void> start() async {
await Isolate.spawn<SendPort>(
entryPoint,
_receivePort.sendPort,
debugName: debugIsolate,
);
_sendPort = await _receivePort.first;
}
static void entryPoint(SendPort sendPort) async {
final port = ReceivePort();
sendPort.send(port.sendPort);
await for (final MLIsolateData mlIsolateData in port) {
var cameraImage = mlIsolateData.cameraImage;
var converted = ImageUtils.convertCameraImage(cameraImage);
if (converted != null) {
Classifier classifier = Classifier(
interpreter:
Interpreter.fromAddress(mlIsolateData.interpreterAddress),
labels: mlIsolateData.labels);
var result = classifier.predict(converted);
mlIsolateData.responsePort?.send(result);
} else {
mlIsolateData.responsePort?.send({"response": "not working yet"});
}
}
}
}
/// Bundles data to pass between Isolate
class MLIsolateData {
CameraImage cameraImage;
int interpreterAddress;
List<String> labels;
SendPort? responsePort;
MLIsolateData(
this.cameraImage,
this.interpreterAddress,
this.labels,
);
}
+65 -129
View File
@@ -2,16 +2,16 @@ import 'dart:isolate';
import 'package:camera/camera.dart';
import 'package:flutter/material.dart';
import 'package:tensordex_mobile/tflite/classifier.dart';
import 'package:tensordex_mobile/tflite/ml_isolate.dart';
import 'package:tflite_flutter/tflite_flutter.dart';
import 'package:tensordex_mobile/utils/image_utils.dart';
import '../tflite/classifier.dart';
import '../utils/logger.dart';
import '../utils/recognition.dart';
import '../utils/stats.dart';
import '../tflite/data/recognition.dart';
import '../tflite/data/stats.dart';
/// [CameraView] sends each frame for inference
class CameraView extends StatefulWidget {
/// [PokedexView] sends each frame for inference
class PokedexView extends StatefulWidget {
/// Callback to pass results after inference to [HomeView]
final Function(List<Recognition> recognitions) resultsCallback;
@@ -19,32 +19,26 @@ class CameraView extends StatefulWidget {
final Function(Stats stats) statsCallback;
/// Constructor
const CameraView(
const PokedexView(
{Key? key, required this.resultsCallback, required this.statsCallback})
: super(key: key);
@override
State<CameraView> createState() => _CameraViewState();
State<PokedexView> createState() => _PokedexViewState();
}
class _CameraViewState extends State<CameraView> with WidgetsBindingObserver {
/// List of available cameras
class _PokedexViewState extends State<PokedexView> with WidgetsBindingObserver {
late List<CameraDescription> cameras;
/// Controller
late CameraController cameraController;
Interpreter? interp;
late MLIsolate _mlIsolate;
/// true when inference is ongoing
bool predicting = false;
bool _cameraInitialized = false;
bool _classifierInitialized = false;
late Classifier classy;
// /// Instance of [Classifier]
// Classifier classifier;
//
// /// Instance of [IsolateUtils]
// IsolateUtils isolateUtils;
late Interpreter interpreter;
late Classifier classifier;
@override
void initState() {
@@ -54,40 +48,21 @@ class _CameraViewState extends State<CameraView> with WidgetsBindingObserver {
void initStateAsync() async {
WidgetsBinding.instance.addObserver(this);
// Spawn a new isolate
// isolateUtils = IsolateUtils();
// await isolateUtils.start();
// Camera initialization
_mlIsolate = MLIsolate();
await _mlIsolate.start();
initializeCamera();
// final gpuDelegateV2 = GpuDelegateV2(
// options: GpuDelegateOptionsV2(
// isPrecisionLossAllowed: false,
// inferencePreference: TfLiteGpuInferenceUsage.fastSingleAnswer,
// inferencePriority1: TfLiteGpuInferencePriority.minLatency,
// inferencePriority2: TfLiteGpuInferencePriority.auto,
// inferencePriority3: TfLiteGpuInferencePriority.auto,
// ));
logger.e("CREATING THE INTERPRETOR");
var interpreterOptions = InterpreterOptions();//..addDelegate(gpuDelegateV2);
interp = await Interpreter.fromAsset('efficientnet_v2s.tflite',
options: interpreterOptions);
logger.e("CREATING THE INTERPRETOR");
classy = Classifier(interpreter: interp);
logger.i(interp?.getOutputTensors());
// Create an instance of classifier to load model and labels
// classifier = Classifier();
// Initially predicting = false
initializeModel();
predicting = false;
}
void initializeModel() async {
var interpreterOptions = InterpreterOptions()..threads = 8;
interpreter = await Interpreter.fromAsset('efficientnet_v2s.tflite',
options: interpreterOptions);
classifier = Classifier(interpreter: interpreter);
_classifierInitialized = true;
}
/// Initializes the camera by setting [cameraController]
void initializeCamera() async {
cameras = await availableCameras();
@@ -97,101 +72,62 @@ class _CameraViewState extends State<CameraView> with WidgetsBindingObserver {
CameraController(cameras[0], ResolutionPreset.low, enableAudio: false);
cameraController.initialize().then((_) async {
/// previewSize is size of each image frame captured by controller
/// 352x288 on iOS, 240p (320x240) on Android with ResolutionPreset.low
// Stream of image passed to [onLatestImageAvailable] callback
await cameraController.startImageStream(onLatestImageAvailable);
/// previewSize is size of each image frame captured by controller
///
/// 352x288 on iOS, 240p (320x240) on Android with ResolutionPreset.low
// Size previewSize = cameraController.value.previewSize;
//
// /// previewSize is size of raw input image to the model
// CameraViewSingleton.inputImageSize = previewSize;
//
// // the display width of image on screen is
// // same as screenWidth while maintaining the aspectRatio
// Size screenSize = MediaQuery.of(context).size;
// CameraViewSingleton.screenSize = screenSize;
// CameraViewSingleton.ratio = screenSize.width / previewSize.height;
setState(() {
_cameraInitialized = true;
});
});
}
/// Callback to receive each frame [CameraImage] perform inference on it
onLatestImageAvailable(CameraImage cameraImage) async {
if (_classifierInitialized) {
if (predicting) {
return;
}
setState(() {
predicting = true;
});
var results = await inference(MLIsolateData(
cameraImage, classifier.interpreter.address, classifier.labels));
if (results.containsKey("recognitions")) {
widget.resultsCallback(results["recognitions"]);
}
if (results.containsKey("stats")) {
widget.statsCallback(results["stats"]);
}
logger.i(results);
setState(() {
predicting = false;
});
}
}
@override
Widget build(BuildContext context) {
// Return empty container while the camera is not initialized
if (!cameraController.value.isInitialized) {
if (!_cameraInitialized) {
return Container();
}
return AspectRatio(
aspectRatio: 1/cameraController.value.aspectRatio,
aspectRatio: 1 / cameraController.value.aspectRatio,
child: CameraPreview(cameraController));
}
/// Callback to receive each frame [CameraImage] perform inference on it
onLatestImageAvailable(CameraImage cameraImage) async {
// if (classifier.interpreter != null && classifier.labels != null) {
// // If previous inference has not completed then return
if (predicting) {
return;
}
setState(() {
predicting = true;
});
logger.i("RECIEVED IMAGE");
logger.i(cameraImage.format.group);
logger.i(cameraImage);
var converted = ImageUtils.convertCameraImage(cameraImage);
if (converted != null){
var result = classy.predict(converted);
logger.e("PREDICTED IMAGE");
logger.i(result);
}
// logger.i(cameraImage);
// logger.i(cameraImage.height);
// logger.i(cameraImage.width);
// logger.i(cameraImage.planes[0]);
//
// var uiThreadTimeStart = DateTime.now().millisecondsSinceEpoch;
//
// // Data to be passed to inference isolate
// var isolateData = IsolateData(
// cameraImage, classifier.interpreter.address, classifier.labels);
//
// // We could have simply used the compute method as well however
// // it would be as in-efficient as we need to continuously passing data
// // to another isolate.
//
// /// perform inference in separate isolate
// Map<String, dynamic> inferenceResults = await inference(isolateData);
//
// var uiThreadInferenceElapsedTime =
// DateTime.now().millisecondsSinceEpoch - uiThreadTimeStart;
//
// // pass results to HomeView
// widget.resultsCallback(inferenceResults["recognitions"]);
//
// // pass stats to HomeView
// widget.statsCallback((inferenceResults["stats"] as Stats)
// ..totalElapsedTime = uiThreadInferenceElapsedTime);
// set predicting to false to allow new frames
setState(() {
predicting = false;
});
/// Runs inference in another isolate
Future<Map<String, dynamic>> inference(MLIsolateData mlIsolateData) async {
ReceivePort responsePort = ReceivePort();
_mlIsolate.sendPort
.send(mlIsolateData..responsePort = responsePort.sendPort);
var results = await responsePort.first;
return results;
}
// /// Runs inference in another isolate
// Future<Map<String, dynamic>> inference(IsolateData isolateData) async {
// ReceivePort responsePort = ReceivePort();
// isolateUtils.sendPort
// .send(isolateData..responsePort = responsePort.sendPort);
// var results = await responsePort.first;
// return results;
// }
@override
void didChangeAppLifecycleState(AppLifecycleState state) async {
switch (state) {
+7 -12
View File
@@ -1,26 +1,21 @@
import 'package:flutter/material.dart';
import 'package:tensordex_mobile/ui/poke_view.dart';
import 'package:tensordex_mobile/utils/recognition.dart';
import 'package:tensordex_mobile/tflite/data/recognition.dart';
import 'package:tensordex_mobile/tflite/data/stats.dart';
import '../utils/logger.dart';
/// [CameraView] sends each frame for inference
/// [PokedexView] sends each frame for inference
class ResultsView extends StatefulWidget {
final List<Recognition> recognitions;
final Stats stats;
/// Constructor
const ResultsView({Key? key}) : super(key: key);
void setResults(Recognition results){
logger.i("RESULTS IN THE RESULT VIEW");
}
const ResultsView(this.recognitions, this.stats, {Key? key}) : super(key: key);
@override
State<ResultsView> createState() => _ResultsViewState();
}
class _ResultsViewState extends State<ResultsView> {
@override
void initState() {
super.initState();
@@ -28,6 +23,6 @@ class _ResultsViewState extends State<ResultsView> {
@override
Widget build(BuildContext context) {
return Text("data");
return Text(widget.recognitions.toString());
}
}
+14 -100
View File
@@ -3,16 +3,12 @@ import 'package:tensordex_mobile/ui/poke_view.dart';
import 'package:tensordex_mobile/ui/results_view.dart';
import '../utils/logger.dart';
import '../utils/recognition.dart';
import '../utils/stats.dart';
import '../tflite/data/recognition.dart';
import '../tflite/data/stats.dart';
class TensordexHome extends StatefulWidget {
const TensordexHome({Key? key, required this.title}) : super(key: key);
// This widget is the home page of your application. It is stateful, meaning
// that it has a State object (defined below) that contains fields that affect
// how it looks.
// This class is the configuration for the state. It holds the values (in this
// case the title) provided by the parent (in this case the App widget) and
// used by the build method of the State. Fields in a Widget subclass are
@@ -25,12 +21,9 @@ class TensordexHome extends StatefulWidget {
}
class _TensordexHomeState extends State<TensordexHome> {
/// Results to draw bounding boxes
List<Recognition>? results;
/// Realtime stats
Stats? stats;
/// Results from the image classifier
List<Recognition> results = [Recognition(1, "NOTHING DETECTED", .5)];
Stats stats = Stats();
/// Scaffold Key
GlobalKey<ScaffoldState> scaffoldKey = GlobalKey();
@@ -38,106 +31,27 @@ class _TensordexHomeState extends State<TensordexHome> {
void _incrementCounter() {
setState(() {
logger.d("Counter Incremented!");
logger.w("Counter Incremented!");
logger.e("Counter Incremented!");
});
}
// void onNewCameraSelected(CameraDescription cameraDescription) async {
// final previousCameraController = controller;
// // Instantiating the camera controller
// final CameraController cameraController = CameraController(
// cameraDescription,
// ResolutionPreset.high,
// imageFormatGroup: ImageFormatGroup.jpeg,
// );
//
// // Dispose the previous controller
// await previousCameraController.dispose();
//
// // Replace with the new controller
// if (mounted) {
// setState(() {
// controller = cameraController;
// });
// }
//
// // Update UI if controller updated
// cameraController.addListener(() {
// if (mounted) setState(() {});
// });
//
// // Initialize controller
// try {
// await cameraController.initialize();
// } on CameraException catch (e) {
// logger.e('Error initializing camera:', e);
// }
//
// // Update the Boolean
// if (mounted) {
// setState(() {
// _isCameraInitialized = controller.value.isInitialized;
// });
// }
// }
// @override
// void initState() {
// super.initState();
// WidgetsBinding.instance.addObserver(this);
// controller = CameraController(_cameras[0], ResolutionPreset.max);
// controller.initialize().then((_) {
// if (!mounted) {
// return;
// }
//
// setState(() {onNewCameraSelected(_cameras[0]);});
// }).catchError((Object e) {
// if (e is CameraException) {
// switch (e.code) {
// case 'CameraAccessDenied':
// logger.w('User denied camera access.');
// controller.initialize().then((_) {
// if (!mounted) {
// return;
// }
// setState(() {});
// }).catchError((Object e) {
// if (e is CameraException) {
// switch (e.code) {
// case 'CameraAccessDenied':
// logger.i('User denied camera access.');
// break;
// default:
// logger.i('Handle other errors.');
// break;
// }
// }
// });
// break;
// default:
// logger.i('Handle other errors.');
// break;
// }
// }
// });
// }
@override
void initState() {
super.initState();
}
@override
void dispose() {
super.dispose();
}
/// Callback to get inference results from [CameraView]
/// Callback to get inference results from [PokedexView]
void resultsCallback(List<Recognition> results) {
setState(() {
this.results = results;
});
}
/// Callback to get inference stats from [CameraView]
/// Callback to get inference stats from [PokedexView]
void statsCallback(Stats stats) {
setState(() {
this.stats = stats;
@@ -152,12 +66,12 @@ class _TensordexHomeState extends State<TensordexHome> {
),
body: Center(
child: Column(
mainAxisAlignment: MainAxisAlignment.center,
mainAxisAlignment: MainAxisAlignment.start,
children: <Widget>[
CameraView(
PokedexView(
resultsCallback: resultsCallback,
statsCallback: statsCallback),
const ResultsView(),
ResultsView(results, stats),
],
),
),
-10
View File
@@ -1,10 +0,0 @@
import 'dart:ui';
class CameraViewSingleton {
static double ratio = 0.0;
static Size screenSize = const Size(0, 0);
static Size inputImageSize = const Size(0, 0);
static Size get actualPreviewSize =>
Size(screenSize.width, screenSize.width * ratio);
}
-23
View File
@@ -1,23 +0,0 @@
/// Bundles different elapsed times
class Stats {
/// Total time taken in the isolate where the inference runs
int totalPredictTime;
/// [totalPredictTime] + communication overhead time
/// between main isolate and another isolate
int totalElapsedTime;
/// Time for which inference runs
int inferenceTime;
/// Time taken to pre-process the image
int preProcessingTime;
Stats(this.totalPredictTime, this.totalElapsedTime, this.inferenceTime,
this.preProcessingTime);
@override
String toString() {
return 'Stats{totalPredictTime: $totalPredictTime, totalElapsedTime: $totalElapsedTime, inferenceTime: $inferenceTime, preProcessingTime: $preProcessingTime}';
}
}
+81
View File
@@ -0,0 +1,81 @@
// void onNewCameraSelected(CameraDescription cameraDescription) async {
// final previousCameraController = controller;
// // Instantiating the camera controller
// final CameraController cameraController = CameraController(
// cameraDescription,
// ResolutionPreset.high,
// imageFormatGroup: ImageFormatGroup.jpeg,
// );
//
// // Dispose the previous controller
// await previousCameraController.dispose();
//
// // Replace with the new controller
// if (mounted) {
// setState(() {
// controller = cameraController;
// });
// }
//
// // Update UI if controller updated
// cameraController.addListener(() {
// if (mounted) setState(() {});
// });
//
// // Initialize controller
// try {
// await cameraController.initialize();
// } on CameraException catch (e) {
// logger.e('Error initializing camera:', e);
// }
//
// // Update the Boolean
// if (mounted) {
// setState(() {
// _isCameraInitialized = controller.value.isInitialized;
// });
// }
// }
// WidgetsBinding.instance.addObserver(this);
// controller = CameraController(_cameras[0], ResolutionPreset.max);
// controller.initialize().then((_) {
// if (!mounted) {
// return;
// }
//
// setState(() {onNewCameraSelected(_cameras[0]);});
// }).catchError((Object e) {
// if (e is CameraException) {
// switch (e.code) {
// case 'CameraAccessDenied':
// logger.w('User denied camera access.');
// controller.initialize().then((_) {
// if (!mounted) {
// return;
// }
// setState(() {});
// }).catchError((Object e) {
// if (e is CameraException) {
// switch (e.code) {
// case 'CameraAccessDenied':
// logger.i('User denied camera access.');
// break;
// default:
// logger.i('Handle other errors.');
// break;
// }
// }
// });
// break;
// default:
// logger.i('Handle other errors.');
// break;
// }
// }
// });
// }