[英]Camera crashes: Fatal signal 6 (SIGABRT) with JNI code
I am currently developing an application where after the camera detected a body, an image (shirt) will be overlaid. 我目前正在开发一个应用程序,在相机检测到身体后,图像(衬衫)将被覆盖。 However, after detecting the body, the camera crashes and my logcat says
Fatal signal 6 (SIGABRT), code -6 in tid 23908 (Thread-39833)
. 但是,在检测到身体之后,相机崩溃了,我的
Fatal signal 6 (SIGABRT), code -6 in tid 23908 (Thread-39833)
。
Below are the codes I've made so far. 以下是到目前为止我编写的代码。
nerds_thesis_clartips_OpencvClass.h //header file nerds_thesis_clartips_OpencvClass.h //头文件
#include <jni.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
#ifndef _Included_nerds_thesis_clartips_OpencvClass
#define _Included_nerds_thesis_clartips_OpencvClass
#ifdef __cplusplus
extern "C" {
#endif
void detectHuman(Mat& frame);
Mat putShirt(Mat frame, Point center, Size humanSize);
JNIEXPORT void JNICALL Java_nerds_thesis_clartips_OpencvClass_humanDetection
(JNIEnv *, jclass, jlong);
#ifdef __cplusplus
}
#endif
#endif
nerds_thesis_clartips_OpencvClass.cpp //C++ file nerds_thesis_clartips_OpencvClass.cpp // C ++文件
#include "nerds_thesis_clartips_OpencvClass.h"
JNIEXPORT void JNICALL Java_nerds_thesis_clartips_OpencvClass_humanDetection
(JNIEnv *, jclass, jlong addrRgba){
Mat& frame = *(Mat*)addrRgba;
detectHuman(frame);
}
void detectHuman(Mat& frame){
// assign xml file to a variable
String human_cascade_name = "/storage/emulated/0/data/haarcascade_upperbody.xml";
CascadeClassifier human_cascade;
// load xml file
if(!human_cascade.load( human_cascade_name ) ) { printf("--(!)Error loading\n"); return; };
std::vector<Rect> humans;
Mat frame_gray;
//convert input to grayscale
cvtColor( frame, frame_gray, CV_BGR2GRAY );
//increase image contrast
equalizeHist( frame_gray, frame_gray);
//Detect Human
human_cascade.detectMultiScale( frame_gray, humans, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(100, 100) );
// Draw the mask over all rectangles
for (size_t i = 0; i < humans.size(); i++){
Rect r = humans[i];
Mat humanROI = frame_gray( humans[i] ); //image of the upper body
int h_temp = humans[i].height; // storing original height
int x = humans[i].x;
int y = humans[i].y - h_temp*(-0.6); // y is increased by 0.6*h
int w = humans[i].width;
int h = h_temp; // height detected
rectangle(frame, Point(x,y), Point(x + w,y +h),Scalar(255,0,255));
Point center( humans[i].x + humans[i].width*0.5, humans[i].y + humans[i].height*0.5 );
frame = putShirt(frame,center,Size( humans[i].width, humans[i].height));
}
}
Mat putShirt(Mat frame, Point center, Size humanSize){
Mat mask = imread("C:/Users/Requinala/AndroidStudioProjects/CLARTIPS/app/src/main/res/drawable/bluevelvet.png");
Mat mask1,src1;
resize(mask,mask1,humanSize);
// ROI selection
Rect roi(center.x - humanSize.width/2, center.y - humanSize.width/2, humanSize.width, humanSize.width);
frame(roi).copyTo(src1);
// to make the white region transparent
Mat mask2,m,m1;
cvtColor(mask1,mask2,CV_BGR2GRAY);
threshold(mask2,mask2,230,255,CV_THRESH_BINARY_INV);
vector<Mat> maskChannels(3),result_mask(3);
split(mask1, maskChannels);
bitwise_and(maskChannels[0],mask2,result_mask[0]);
bitwise_and(maskChannels[1],mask2,result_mask[1]);
bitwise_and(maskChannels[2],mask2,result_mask[2]);
merge(result_mask,m ); // imshow("m",m);
mask2 = 255 - mask2;
vector<Mat> srcChannels(3);
split(src1, srcChannels);
bitwise_and(srcChannels[0],mask2,result_mask[0]);
bitwise_and(srcChannels[1],mask2,result_mask[1]);
bitwise_and(srcChannels[2],mask2,result_mask[2]);
merge(result_mask,m1 ); // imshow("m1",m1);
addWeighted(m,1,m1,1,0,m1); // imshow("m2",m1);
m1.copyTo(frame(roi));
return frame;
}
OpencvCamera.java //Java class for camera OpencvCamera.java //相机的Java类
public class OpencvCamera extends AppCompatActivity implements CameraBridgeViewBase.CvCameraViewListener2{
Mat mRgba;
@Override
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC4);
}
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
OpencvClass.humanDetection(mRgba.getNativeObjAddr());
return mRgba;
}
}
OpencvClass.java //java class for the jni OpencvClass.java // jni的java类
public class OpencvClass {
public native static void humanDetection(long addrRgba);
}
logcats logcats
02-24 16:45:36.269 13615-23908/? A/libc: Fatal signal 6 (SIGABRT), code -6 in tid 23908 (Thread-39833)
02-24 16:45:36.372 280-280/? I/DEBUG: *** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***
02-24 16:45:36.372 280-280/? I/DEBUG: Build fingerprint: 'CMCC/M631Y/M631Y:5.1.1/LMY47V/M631Y_02.24.00RPD_HK.00:user/release-keys'
02-24 16:45:36.372 280-280/? I/DEBUG: Revision: '0'
02-24 16:45:36.372 280-280/? I/DEBUG: ABI: 'arm'
02-24 16:45:36.373 280-280/? I/DEBUG: pid: 13615, tid: 23908, name: Thread-39833 >>> nerds.thesis.clartips <<<
02-24 16:45:36.373 280-280/? I/DEBUG: signal 6 (SIGABRT), code -6 (SI_TKILL), fault addr --------
02-24 16:45:36.396 280-280/? I/DEBUG: r0 00000000 r1 00005d64 r2 00000006 r3 00000000
02-24 16:45:36.396 280-280/? I/DEBUG: r4 a4674dd8 r5 00000006 r6 00000000 r7 0000010c
02-24 16:45:36.396 280-280/? I/DEBUG: r8 00000047 r9 00000001 sl a4674400 fp 0000203e
02-24 16:45:36.397 280-280/? I/DEBUG: ip 00005d64 sp a46732a0 lr b6dbfc2d pc b6de5f3c cpsr 600f0010
02-24 16:45:36.397 280-280/? I/DEBUG: backtrace:
02-24 16:45:36.397 280-280/? I/DEBUG: #00 pc 00039f3c /system/lib/libc.so (tgkill+12)
02-24 16:45:36.397 280-280/? I/DEBUG: #01 pc 00013c29 /system/lib/libc.so (pthread_kill+52)
02-24 16:45:36.397 280-280/? I/DEBUG: #02 pc 00014847 /system/lib/libc.so (raise+10)
02-24 16:45:36.397 280-280/? I/DEBUG: #03 pc 00010fd5 /system/lib/libc.so (__libc_android_abort+36)
02-24 16:45:36.397 280-280/? I/DEBUG: #04 pc 0000f534 /system/lib/libc.so (abort+4)
02-24 16:45:36.397 280-280/? I/DEBUG: #05 pc 008a2e50 /data/app/nerds.thesis.clartips-2/lib/arm/libopencv_java3.so (_ZN9__gnu_cxx27__verbose_terminate_handlerEv+344)
02-24 16:45:36.398 280-280/? I/DEBUG: #06 pc 0087914c /data/app/nerds.thesis.clartips-2/lib/arm/libopencv_java3.so (_ZN10__cxxabiv111__terminateEPFvvE+4)
02-24 16:45:36.398 280-280/? I/DEBUG: #07 pc 0087918c /data/app/nerds.thesis.clartips-2/lib/arm/libopencv_java3.so (_ZSt9terminatev+16)
02-24 16:45:36.398 280-280/? I/DEBUG: #08 pc 00878b68 /data/app/nerds.thesis.clartips-2/lib/arm/libopencv_java3.so (__cxa_throw+168)
02-24 16:45:36.398 280-280/? I/DEBUG: #09 pc 001c8305 /data/app/nerds.thesis.clartips-2/lib/arm/libopencv_java3.so (_ZN2cv5errorERKNS_9ExceptionE+244)
02-24 16:45:36.398 280-280/? I/DEBUG: #10 pc 001c8445 /data/app/nerds.thesis.clartips-2/lib/arm/libopencv_java3.so (_ZN2cv5errorEiRKNS_6StringEPKcS4_i+96)
02-24 16:45:36.398 280-280/? I/DEBUG: #11 pc 0037660f /data/app/nerds.thesis.clartips-2/lib/arm/libopencv_java3.so (_ZN2cv6resizeERKNS_11_InputArrayERKNS_12_OutputArrayENS_5Size_IiEEddi+406)
02-24 16:45:36.399 280-280/? I/DEBUG: #12 pc 0000fbf5 /data/app/nerds.thesis.clartips-2/lib/arm/libMyLibs.so (putShirt+172)
02-24 16:45:36.399 280-280/? I/DEBUG: #13 pc 0000f7ab /data/app/nerds.thesis.clartips-2/lib/arm/libMyLibs.so (detectHuman+842)
02-24 16:45:36.399 280-280/? I/DEBUG: #14 pc 0015fc6d /data/dalvik-cache/arm/data@app@nerds.thesis.clartips-2@base.apk@classes.dex
I'm a beginner and I've searched about SIGABRT
and it says it has something to do about memory issues. 我是一个初学者,已经搜索了
SIGABRT
,它说它与内存问题有关。 I can't find where I've gone wrong in my codes. 我找不到我的代码出了问题的地方。
This is very crucial. 这是非常关键的。 All helps/ideas will be appreciated because this will serve as my final output in college.
我们将不胜感激所有帮助/想法,因为这将作为我在大学的最终成果。
imread("C:/Users/Requinala/AndroidStudioProjects/CLARTIPS/app/src/main/res/drawable/bluevelvet.png")
tires to read the png from your PC disk. 会从您的PC磁盘读取png 。 No wonder that it fails.
难怪它失败了。 You should
你应该
You can use Java to extract images from the resources (or assets) to local storage, or you can use the Android assets native API to read such images directly from the APK. 您可以使用Java将资源(或资产)中的图像提取到本地存储中,也可以使用Android资产本机API直接从APK中读取此类图像。
Assets seem to suit your cause better, because the drawable resources are meant to be adapted to the screen resolution. 资产似乎更适合您的需求,因为可绘制资源旨在适应屏幕分辨率。
Using native assets API with imread() is possible, but tricky. 可以将本机资产API与imread()一起使用,但很棘手。
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.