简体   繁体   中英

Sharpening video images using openCV

I would like to sharpen my image using OpenCV and I have looked at an example online which is performing the sharpening on a grayscale image and I tried it and it is working perfectly. However, I am now trying to do the same but with RGB colors so I am performing the same functionality on the three channel separately, but it is not giving me any result the image is exactly as the original image.

#include "Sharpening.h"
using namespace std;

Sharpening::Sharpening() {
}

Sharpening::~Sharpening() {
}

IplImage* Sharpening::laplace(IplImage* channel) {

    CvSize size = cvSize(channel->width, channel->height);

    IplImage* temp = cvCreateImage(size, IPL_DEPTH_8U, 1);

    IplImage* lapl = cvCreateImage(size, IPL_DEPTH_8U, 1);

    int width = size.width;

    int height = size.height;

    cvConvertScale(channel, temp, 1.0);

    CvMat* ker = cvCreateMat(3, 3, CV_32FC1);

    cvSet(ker, cvScalarAll(-1.0));

    cvSet2D(ker, 1, 1, cvScalarAll(15.0));

    cout << "this is been executed";

    cvFilter2D(temp, lapl, ker);

    cvReleaseMat(&ker);

    double maxv = 0.0;

    float maxFloat = 1.79769e+308;

    double minv = maxFloat;

    cvMinMaxLoc(lapl, &minv, &maxv);


    for (int i = 0; i < width * height; i++) {

        double lap_val = cvGet1D(lapl, i).val[0];

        int v = (int) ((255.0 * lap_val / maxv) + 0.5); // this calculation does nothing particularly

        cvSet1D(temp, i, cvScalarAll(v));
    }

    maxv = 0.0;

    cvMinMaxLoc(channel, &minv, &maxv);

    for (int i = 0; i < width * height; i++) {

        double val = cvGet1D(channel, i).val[0];
        int v = (int) ((255.0 * val / maxv) + 0.5);

        cvSet1D(channel, i, cvScalarAll(v));
    }

    cvReleaseImage(&temp);
    cvReleaseImage(&lapl);
    cvReleaseMat(&ker);

    return channel;
} // end of function

int Sharpening::calculateLoop(int number) {
    int value = 2;
    for (int i = 0; i < 10; i++) {
        number = number * value;
        cout << number << endl;
    }

    return number;
}

//======================================================================================

int Sharpening::SharpenColored(Sharpening sharp) {

    int key = 0;

    CvCapture *capture = 0;

    IplImage* frame = 0;

    cvNamedWindow("deblur", CV_WINDOW_AUTOSIZE);

    cvNamedWindow("deblur2", CV_WINDOW_AUTOSIZE);

    cvNamedWindow("origional", CV_WINDOW_AUTOSIZE);

    // initialize camera
    capture = cvCaptureFromCAM(0); //capture from a camera

    //capture = cvCaptureFromAVI("jabelH2.avi");

    //frame = cvQueryFrame(capture);

    if (!cvGrabFrame(capture)) { // capture a frame
        printf("Could not grab a frame\n\7");
        exit(0);
    }

    frame = cvQueryFrame(capture);

    CvSize imageSize1 = cvSize(frame->width, frame->height);

    IplImage* R = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);

    IplImage* G = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);

    IplImage* B = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);

    IplImage* R2 = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);

    IplImage* G2 = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);

    IplImage* B2 = cvCreateImage(imageSize1, IPL_DEPTH_8U, 1);

    IplImage* source = cvCreateImage(imageSize1, IPL_DEPTH_8U, 3);

    IplImage* result = cvCreateImage(imageSize1, IPL_DEPTH_8U, 3);

    IplImage* result2 = cvCreateImage(imageSize1, IPL_DEPTH_8U, 3);

    QFuture<IplImage*> future1;

    QFuture<IplImage*> future2;

    QFuture<IplImage*> future3;

    while (key != 'q') {
        // get a frame

        frame = cvQueryFrame(capture);

        // always check
        if (!frame)
            break;

        source = frame;

        cvSplit(frame, B, G, R, NULL);

        future1 = QtConcurrent::run(sharp, &Sharpening::laplace, R);

        future2 = QtConcurrent::run(sharp, &Sharpening::laplace, G);

        future3 = QtConcurrent::run(sharp, &Sharpening::laplace, B);

        R2 = future1.result();

        G2 = future2.result();

        B2 = future3.result();

        cvMerge(B2, G2, R2, NULL, result);

        cvAdd(source, result, result2, NULL);
        cvShowImage("origional", source);
        cvShowImage("deblur", R2);
        cvShowImage("deblur2", G2);

        key = cvWaitKey(1);
    } //end of while

    cvDestroyWindow("deblur");
    cvDestroyWindow("deblur2");
    cvDestroyWindow("origional");
    cvReleaseImage(&R);
    cvReleaseImage(&source);
    cvReleaseImage(&R2);
    cvReleaseImage(&G);
    cvReleaseImage(&G2);
    cvReleaseImage(&B);
    cvReleaseImage(&B2);
    cvReleaseImage(&result);
    cvReleaseImage(&result2);
    cvReleaseCapture(&capture);
    delete future1;
    delete future2;
    delete future3;

    return 0;
} //end of function

//======================================================================================

int main(int argc, char *argv[]) {
    Sharpening sh;
    sh.SharpenColored(sh);
}

I am now trying to do the same but with RGB colors so I am performing the same functionality

Check your assumptions ! I don't think you are doing the same. I don't know openCv very well, but your not doing anything with your temp image, so there is no reason for the channel image to change ! You should set the result of the temp image in the original image, something like this perhaps :

for (int i = 0; i < width * height; i++) {

    double lap_val = cvGet1D(lapl, i).val[0]; // get modified image data

    int v = (int) ((255.0 * lap_val / maxv) + 0.5); // scale to 0 255
    cvSet1D(channel, i, cvScalarAll(v)); // store in original image
}

Or you can take the original code, and comment it to explain what each cvImage contains, and see what you have missed when reusing it in your app.

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM