![](/img/trans.png)
[英]Swapping Face After Getting Face Contours In Flutter (Google ML Kit)
[英]Flutter Google ML Kit Face Detection not detecting
我正在尝试检测图像上的人脸。 我用相机拍了一张照片。 然后我试图在这张照片上找到面孔。 但是我的 faces 变量始终是 null。我正在使用这个 package进行检测。 这是我的代码片段:
final faceDetector = GoogleMlKit.vision.faceDetector(FaceDetectorOptions(enableTracking: false, enableContours: false, enableClassification: false, enableLandmarks: false, mode: FaceDetectorMode.accurate ));
@override
void initState() {
super.initState();
findFaces();
}
Future<void> detectFace() async {
final InputImage data = InputImage.fromFile(widget.capturedPhoto);
final List<Face> faces = await faceDetector.processImage(data);
}
capturedPhoto 是包含用相机拍摄的照片的文件。 我的 faces 变量始终是 null 列表。 这里还有一个警告:
[WARNING]The specified colorspace format is not supported. Falling back on Libyuv.
如何解决这个问题?
为此,请检查我的代码。 捕获图像后,我确实将文件传递到此页面。在这里我执行图像检测
import 'dart:async';
import 'dart:io';
import 'dart:math';
import 'package:camera/camera.dart';
import 'package:flutter/material.dart';
import 'package:google_ml_kit/google_ml_kit.dart';
class DetailScreen extends StatefulWidget {
final String imagePath;
const DetailScreen({required this.imagePath});
@override
_DetailScreenState createState() => _DetailScreenState();
}
class _DetailScreenState extends State<DetailScreen> {
late final String _imagePath;
final faceDetector = GoogleMlKit.vision.faceDetector(FaceDetectorOptions(
enableTracking: true,
enableContours: true,
enableClassification: true,
enableLandmarks: true,
mode: FaceDetectorMode.accurate));
List<dynamic> faceValue = [];
Future<void> _getImageSize(File imageFile) async {
final Completer<Size> completer = Completer<Size>();
final Image image = Image.file(imageFile);
image.image.resolve(const ImageConfiguration()).addListener(
ImageStreamListener((ImageInfo info, bool _) {
completer.complete(Size(
info.image.width.toDouble(),
info.image.height.toDouble(),
));
}),
);
final Size imageSize = await completer.future;
setState(() {
_imageSize = imageSize;
});
}
void _recognizeImage() async {
_getImageSize(File(_imagePath));
final inputImage = InputImage.fromFilePath(_imagePath);
final List<Face> faces = await faceDetector.processImage(inputImage);
for (Face face in faces) {
final Rect boundingBox = face.boundingBox;
final double? rotY =
face.headEulerAngleY; // Head is rotated to the right rotY degrees
final double? rotZ =
face.headEulerAngleZ; // Head is tilted sideways rotZ degrees
final FaceLandmark? leftEar = face.getLandmark(FaceLandmarkType.leftEar);
if (leftEar != null) {
final Offset leftEarPos = leftEar.position;
print("ppppppppppppppppppppppppppppppppppppppppppp ${leftEarPos}");
}
if (face.smilingProbability != null) {
final double? smileProb = face.smilingProbability;
print("ppppppppppppppppppppppppppppppppppppppppppp ${smileProb}");
}
if (face.trackingId != null) {
final int? id = face.trackingId;
print("ppppppppppppppppppppppppppppppppppppppppppp ${id}");
}
setState(() {
faceValue.addAll([
boundingBox,
rotY,
rotZ,
face.trackingId,
face.smilingProbability,
]);
print(
"lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll ${faceValue}");
});
}
}
@override
void initState() {
_imagePath = widget.imagePath;
final faceDetector = GoogleMlKit.vision.faceDetector(FaceDetectorOptions(
enableTracking: true,
enableContours: true,
enableClassification: true,
enableLandmarks: true,
mode: FaceDetectorMode.accurate));
_recognizeImage();
super.initState();
}
@override
void dispose() {
// Disposing the imageLabeler when not used anymore
imageLabeler.close();
textDetector.close();
faceDetector.close();
// objectDetector.close();
super.dispose();
}
@override
Widget build(BuildContext context) {
return Scaffold(
body: _imageSize != null
? Stack(
children: [
Container(
width: double.maxFinite,
color: Colors.black,
child: AspectRatio(
aspectRatio: _imageSize!.aspectRatio,
child: Image.file(
File(_imagePath),
),
),
),
Align(
alignment: Alignment.bottomCenter,
child: Container(
color: Colors.amber,
child: SingleChildScrollView(
padding: EdgeInsets.all(20),
child: faceValue != null
? ListView.builder(
shrinkWrap: true,
physics: BouncingScrollPhysics(),
itemCount: faceValue.length,
itemBuilder: (context, index) {
return Column(
children: [
Text(
'${faceValue[index]}',
style: TextStyle(
fontSize: 20,
fontWeight: FontWeight.w900),
)
],
);
})
: Container(),
),
),
),
],
)
: Container(
color: Colors.blue,
child: Center(
child: CircularProgressIndicator(),
),
),
);
}
}
输出
[Rect.fromLTRB(247.0, 224.0, 554.0, 532.0), 8.070308685302734, 2.248018503189087, 0, 0.9941101670265198,]
这是由于图像方向问题。 原生 ios 相机拍摄的图像,当看起来是纵向时,实际上可能是横向纵向。 所以我通过 Exif 数据检查了真实方向并修复了它们的方向。 现在效果很好。
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.