![](/img/trans.png)
[英]Open PNG-File, Edit Transparency (Alpha) and Save on Android with Java
[英]How to join png with alpha / transparency in a frame in realtime
我在 OpenCV android 2.4.11 的例子下工作,它使用相機檢測人臉。 我沒有在發現的臉上畫一個矩形,而是試圖在臉上放一個面具(png 圖像)。 但是為了在臉上顯示圖像,png 圖像帶有黑色背景,其中有透明度。
FdActivity.java
public void onCameraViewStarted(int width, int height) {
mGray = new Mat();
mRgba = new Mat();
//Load my mask png
Bitmap image = BitmapFactory.decodeResource(getResources(), R.drawable.mask_1);
mask = new Mat();
Utils.bitmapToMat(image, mask);
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2,
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
}
else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
}
else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++) {
overlayImage(mRgba, mask, facesArray[i]);
}
return mRgba;
}
public Mat overlayImage(Mat background, Mat foregroundMask, Rect faceRect)
{
Mat mask = new Mat();
Imgproc.resize(this.mask, mask, faceRect.size());
Mat source = new Mat();
Imgproc.resize(foregroundMask, source, background.size());
mask.copyTo( background.submat( new Rect((int) faceRect.tl().x, (int) faceRect.tl().y, mask.cols(), mask.rows())) );
source.release();
mask.release();
return background;
}
注意:由於我沒有設置Android開發環境,我將解釋一般原理並提供Python中的示例實現。 將其移植到 Java 應該相當簡單。 隨意將您的代碼作為單獨的答案發布。
你需要做一些類似於addWeighted
操作所做的事情,那就是操作
但是,在您的情況下, α 需要是一個矩陣(即我們需要每個像素不同的混合系數)。
讓我們使用一些示例圖像來說明這一點。 我們可以使用 Lena 圖像作為樣本人臉:
此圖像作為具有透明度的疊加層:
這張圖片是沒有透明度的疊加層:
為了獲得alpha矩陣,我們可以使用閾值確定前景(覆蓋)和背景(人臉)蒙版,或者使用來自輸入圖像的 alpha 通道(如果可用)。
在值范圍為 0.0 .. 1.0 的浮點圖像上執行此操作很有用。 然后我們可以將兩個掩碼之間的關系表示為
foreground_mask = 1.0 - background_mask
即兩個掩碼加在一起的結果是所有的。
對於 RGBA 格式的疊加圖像,我們得到以下前景和背景蒙版:
當我們在 RGB 格式的情況下使用閾值、侵蝕和模糊時,我們得到以下前景和背景蒙版:
現在我們可以計算兩個加權部分:
foreground_part = overlay_image * foreground_mask
background_part = face_image * background_mask
對於 RGBA 疊加,前景和背景部分如下所示:
對於 RGB 疊加,前景和背景部分如下所示:
最后將它們加在一起,並將圖像轉換回 0-255 范圍內的 8 位整數。
運算結果如下(分別為RGBA和RGB疊加):
import numpy as np
import cv2
# ==============================================================================
def blend_non_transparent(face_img, overlay_img):
# Let's find a mask covering all the non-black (foreground) pixels
# NB: We need to do this on grayscale version of the image
gray_overlay = cv2.cvtColor(overlay_img, cv2.COLOR_BGR2GRAY)
overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1]
# Let's shrink and blur it a little to make the transitions smoother...
overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
overlay_mask = cv2.blur(overlay_mask, (3, 3))
# And the inverse mask, that covers all the black (background) pixels
background_mask = 255 - overlay_mask
# Turn the masks into three channel, so we can use them as weights
overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)
# Create a masked out face image, and masked out overlay
# We convert the images to floating point in range 0.0 - 1.0
face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))
# And finally just add them together, and rescale it back to an 8bit integer image
return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))
# ==============================================================================
# We load the images
face_img = cv2.imread("lena.png", -1)
overlay_img = cv2.imread("overlay.png", -1)
result_1 = blend_non_transparent(face_img, overlay_img)
cv2.imwrite("merged.png", result_1)
import numpy as np
import cv2
# ==============================================================================
def blend_transparent(face_img, overlay_t_img):
# Split out the transparency mask from the colour info
overlay_img = overlay_t_img[:,:,:3] # Grab the BRG planes
overlay_mask = overlay_t_img[:,:,3:] # And the alpha plane
# Again calculate the inverse mask
background_mask = 255 - overlay_mask
# Turn the masks into three channel, so we can use them as weights
overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)
# Create a masked out face image, and masked out overlay
# We convert the images to floating point in range 0.0 - 1.0
face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))
# And finally just add them together, and rescale it back to an 8bit integer image
return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))
# ==============================================================================
# We load the images
face_img = cv2.imread("lena.png", -1)
overlay_t_img = cv2.imread("overlay_transparent.png", -1) # Load with transparency
result_2 = blend_transparent(face_img, overlay_t_img)
cv2.imwrite("merged_transparent.png", result_2)
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.