簡體   English   中英

使用openCV銳化視頻圖像

[英]Sharpening video images using openCV

我想使用OpenCV銳化我的圖像,我在網上查看了一個在灰度圖像上執行銳化的示例,我嘗試了它並且它工作得很好。 但是,我現在嘗試使用RGB顏色,所以我在三個通道上分別執行相同的功能,但它沒有給我任何結果,圖像與原始圖像完全一樣。

#include "Sharpening.h"
using namespace std;

Sharpening::Sharpening() {
}

Sharpening::~Sharpening() {
}

IplImage* Sharpening::laplace(IplImage* channel) {

    CvSize size = cvSize(channel->width, channel->height);

    IplImage* temp = cvCreateImage(size, IPL_DEPTH_8U, 1);

    IplImage* lapl = cvCreateImage(size, IPL_DEPTH_8U, 1);

    int width = size.width;

    int height = size.height;

    cvConvertScale(channel, temp, 1.0);

    CvMat* ker = cvCreateMat(3, 3, CV_32FC1);

    cvSet(ker, cvScalarAll(-1.0));

    cvSet2D(ker, 1, 1, cvScalarAll(15.0));

    cout << "this is been executed";

    cvFilter2D(temp, lapl, ker);

    cvReleaseMat(&ker);

    double maxv = 0.0;

    float maxFloat = 1.79769e+308;

    double minv = maxFloat;

    cvMinMaxLoc(lapl, &minv, &maxv);


    for (int i = 0; i < width * height; i++) {

        double lap_val = cvGet1D(lapl, i).val[0];

        int v = (int) ((255.0 * lap_val / maxv) + 0.5); // this calculation does nothing particularly

        cvSet1D(temp, i, cvScalarAll(v));
    }

    maxv = 0.0;

    cvMinMaxLoc(channel, &minv, &maxv);

    for (int i = 0; i < width * height; i++) {

        double val = cvGet1D(channel, i).val[0];
        int v = (int) ((255.0 * val / maxv) + 0.5);

        cvSet1D(channel, i, cvScalarAll(v));
    }

    cvReleaseImage(&temp);
    cvReleaseImage(&lapl);
    cvReleaseMat(&ker);

    return channel;
} // end of function

int Sharpening::calculateLoop(int number) {
    int value = 2;
    for (int i = 0; i < 10; i++) {
        number = number * value;
        cout << number << endl;
    }

    return number;
}

//======================================================================================

int Sharpening::SharpenColored(Sharpening sharp) {

    int key = 0;

    CvCapture *capture = 0;

    IplImage* frame = 0;

    cvNamedWindow("deblur", CV_WINDOW_AUTOSIZE);

    cvNamedWindow("deblur2", CV_WINDOW_AUTOSIZE);

    cvNamedWindow("origional", CV_WINDOW_AUTOSIZE);

    // initialize camera
    capture = cvCaptureFromCAM(0); //capture from a camera

    //capture = cvCaptureFromAVI("jabelH2.avi");

    //frame = cvQueryFrame(capture);

    if (!cvGrabFrame(capture)) { // capture a frame
        printf("Could not grab a frame\n\7");
        exit(0);
    }

    frame = cvQueryFrame(capture);

    CvSize imageSize1 = cvSize(frame->width, frame->height);

    IplImage* R = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);

    IplImage* G = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);

    IplImage* B = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);

    IplImage* R2 = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);

    IplImage* G2 = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);

    IplImage* B2 = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);

    IplImage* source = cvCreateImage(imageSize1, IPL_DEPTH_8U, 3);

    IplImage* result = cvCreateImage(imageSize1, IPL_DEPTH_8U, 3);

    IplImage* result2 = cvCreateImage(imageSize1, IPL_DEPTH_8U, 3);

    QFuture<IplImage*> future1;

    QFuture<IplImage*> future2;

    QFuture<IplImage*> future3;

    while (key != 'q') {
        // get a frame

        frame = cvQueryFrame(capture);

        // always check
        if (!frame)
            break;

        source = frame;

        cvSplit(frame, B, G, R, NULL);

        future1 = QtConcurrent::run(sharp, &Sharpening::laplace, R);

        future2 = QtConcurrent::run(sharp, &Sharpening::laplace, G);

        future3 = QtConcurrent::run(sharp, &Sharpening::laplace, B);

        R2 = future1.result();

        G2 = future2.result();

        B2 = future3.result();

        cvMerge(B2, G2, R2, NULL, result);

        cvAdd(source, result, result2, NULL);
        cvShowImage("origional", source);
        cvShowImage("deblur", R2);
        cvShowImage("deblur2", G2);

        key = cvWaitKey(1);
    } //end of while

    cvDestroyWindow("deblur");
    cvDestroyWindow("deblur2");
    cvDestroyWindow("origional");
    cvReleaseImage(&R);
    cvReleaseImage(&source);
    cvReleaseImage(&R2);
    cvReleaseImage(&G);
    cvReleaseImage(&G2);
    cvReleaseImage(&B);
    cvReleaseImage(&B2);
    cvReleaseImage(&result);
    cvReleaseImage(&result2);
    cvReleaseCapture(&capture);
    delete future1;
    delete future2;
    delete future3;

    return 0;
} //end of function

//======================================================================================

int main(int argc, char *argv[]) {
    Sharpening sh;
    sh.SharpenColored(sh);
}

我現在正嘗試使用RGB顏色,所以我執行相同的功能

檢查你的假設! 我不認為你這樣做。 我不太了解openCv,但是你沒有對你的臨時圖像做任何事情,所以沒有理由改變頻道圖像! 您應該在原始圖像中設置臨時圖像的結果,可能是這樣的:

for (int i = 0; i < width * height; i++) {

    double lap_val = cvGet1D(lapl, i).val[0]; // get modified image data

    int v = (int) ((255.0 * lap_val / maxv) + 0.5); // scale to 0 255
    cvSet1D(channel, i, cvScalarAll(v)); // store in original image
}

或者您可以使用原始代碼,並對其進行注釋以解釋每個cvImage包含的內容,並查看在應用中重復使用時遺漏的內容。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM