简体   繁体   中英

How convert byte array RGB image into greyscale

I use com.google.zxing core to scan an image. RGBLuminanceSource constructor gets int array of pixels, convert it into byte array and "convert the entire image to a greyscale array". I have byte array of pixels and i would like only to convert it.

Can anyone help me with with the gray-scale conversion?

package com.google.zxing;

/**
 * This class is used to help decode images from files which arrive as RGB data from
 * an ARGB pixel array. It does not support rotation.
 *
 * @author dswitkin@google.com (Daniel Switkin)
 * @author Betaminos
 */
public final class RGBLuminanceSource extends LuminanceSource {

  private final byte[] luminances;
  private final int dataWidth;
  private final int dataHeight;
  private final int left;
  private final int top;

  public RGBLuminanceSource(int width, int height, int[] pixels) {
    super(width, height);

    dataWidth = width;
    dataHeight = height;
    left = 0;
    top = 0;

    // In order to measure pure decoding speed, we convert the entire image to a greyscale array
    // up front, which is the same as the Y channel of the YUVLuminanceSource in the real app.
    //
    // Total number of pixels suffices, can ignore shape
    int size = width * height;
    luminances = new byte[size];
    for (int offset = 0; offset < size; offset++) {
      int pixel = pixels[offset];
      int r = (pixel >> 16) & 0xff; // red
      int g2 = (pixel >> 7) & 0x1fe; // 2 * green
      int b = pixel & 0xff; // blue
      // Calculate green-favouring average cheaply
      luminances[offset] = (byte) ((r + g2 + b) / 4);
    }
  }

    /**
     * My constructor
     */
  public RGBLuminanceSource(byte[] pixels,
                             int dataWidth,
                             int dataHeight
                        ) {
    this(pixels,dataWidth,dataHeight,0,0,dataWidth,dataHeight);

    // WHAT SHOULD I DO HERE TO "convert the entire image to a greyscale array"?




  }

  private RGBLuminanceSource(byte[] pixels,
                             int dataWidth,
                             int dataHeight,
                             int left,
                             int top,
                             int width,
                             int height) {
    super(width, height);
    if (left + width > dataWidth || top + height > dataHeight) {
      throw new IllegalArgumentException("Crop rectangle does not fit within image data.");
    }
    this.luminances = pixels;
    this.dataWidth = dataWidth;
    this.dataHeight = dataHeight;
    this.left = left;
    this.top = top;
  }

  @Override
  public byte[] getRow(int y, byte[] row) {
    if (y < 0 || y >= getHeight()) {
      throw new IllegalArgumentException("Requested row is outside the image: " + y);
    }
    int width = getWidth();
    if (row == null || row.length < width) {
      row = new byte[width];
    }
    int offset = (y + top) * dataWidth + left;
    System.arraycopy(luminances, offset, row, 0, width);
    return row;
  }

  @Override
  public byte[] getMatrix() {
    int width = getWidth();
    int height = getHeight();

    // If the caller asks for the entire underlying image, save the copy and give them the
    // original data. The docs specifically warn that result.length must be ignored.
    if (width == dataWidth && height == dataHeight) {
      return luminances;
    }

    int area = width * height;
    byte[] matrix = new byte[area];
    int inputOffset = top * dataWidth + left;

    // If the width matches the full width of the underlying data, perform a single copy.
    if (width == dataWidth) {
      System.arraycopy(luminances, inputOffset, matrix, 0, area);
      return matrix;
    }

    // Otherwise copy one cropped row at a time.
    for (int y = 0; y < height; y++) {
      int outputOffset = y * width;
      System.arraycopy(luminances, inputOffset, matrix, outputOffset, width);
      inputOffset += dataWidth;
    }
    return matrix;
  }

  @Override
  public boolean isCropSupported() {
    return true;
  }

  @Override
  public LuminanceSource crop(int left, int top, int width, int height) {
    return new RGBLuminanceSource(luminances,
                                  dataWidth,
                                  dataHeight,
                                  this.left + left,
                                  this.top + top,
                                  width,
                                  height);
  }

}

Assuming your pixels bytes are RGB byte interleaved format, you could do:

public RGBLuminanceSource(byte[] pixels, int width, int height) {
    super(width, height);

    dataWidth = width;
    dataHeight = height;
    left = 0;
    top = 0;

    // Total number of pixels suffices, can ignore shape
    int size = width * height;
    luminances = new byte[size];
    for (int offset = 0; offset < size; offset++) {
        int r  =  pixels[offset * 3    ] & 0xff; // red
        int g2 = (pixels[offset * 3 + 1] & 0xff) << 1 // 2 * green
        int b  =  pixels[offset * 3 + 2] & 0xff; // blue

        // Calculate green-favouring average cheaply
        luminances[offset] = (byte) ((r + g2 + b) / 4);
}

The calculation isn't really the standard way of computing luminance (grayscale) form RGB values, but should produce the same results as with the int packed RGB version.

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM