![](/img/trans.png)
[英]function with bool return type in OpenGL ES shader using GPUImage
[英]GPUImage custom OpenGL ES shader resulting in black image
基於此,在另一個OpenGL ES圖像過濾器上工作:
uniform sampler2D texture;
uniform float amount;
uniform vec2 texSize;
varying vec2 texCoord;
void main() {
vec4 color = texture2D(texture, texCoord);
vec4 orig = color;
/* High pass filter */
vec4 highpass = color * 5.0;
float dx = 1.0 / texSize.x;
float dy = 1.0 / texSize.y;
highpass += texture2D(texture, texCoord + vec2(-dx, -dy)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(dx, -dy)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(dx, dy)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(-dx, dy)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(-dx * 2.0, -dy * 2.0)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(dx * 2.0, -dy * 2.0)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(dx * 2.0, dy * 2.0)) * -0.625;
highpass += texture2D(texture, texCoord + vec2(-dx * 2.0, dy * 2.0)) * -0.625;
highpass.a = 1.0;
/* Overlay blend */
vec3 overlay = vec3(1.0);
if (highpass.r <= 0.5) {
overlay.r = 2.0 * color.r * highpass.r;
} else {
overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r);
}
if (highpass.g <= 0.5) {
overlay.g = 2.0 * color.g * highpass.g;
} else {
overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g);
}
if (highpass.b <= 0.5) {
overlay.b = 2.0 * color.b * highpass.b;
} else {
overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b);
}
color.rgb = (overlay * 0.8) + (orig.rgb * 0.2);
/* Desaturated hard light */
vec3 desaturated = vec3(orig.r + orig.g + orig.b / 3.0);
if (desaturated.r <= 0.5) {
color.rgb = 2.0 * color.rgb * desaturated;
} else {
color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated);
}
color = (orig * 0.6) + (color * 0.4);
/* Add back some color */
float average = (color.r + color.g + color.b) / 3.0;
color.rgb += (average - color.rgb) * (1.0 - 1.0 / (1.001 - 0.45));
gl_FragColor = (color * amount) + (orig * (1.0 - amount));
}
根據昨天的問題 ,我知道為每個float和vec分配精度。 這一次,它編譯罰款,但是當我去申請在GPUImage過濾器(例如,通過設定的值clarity
至0.8
),圖像變黑。 我的直覺告訴我這與紋理大小有關,但是在不知道GPUImage如何處理紋理的情況下,我有點卡住了。
這是我在Objective-C中的實現:
。H
#import <GPUImage/GPUImage.h>
@interface GPUImageClarityFilter : GPUImageFilter
{
GLint clarityUniform;
}
// Gives the image a gritty, surreal contrasty effect
// Value 0 to 1
@property (readwrite, nonatomic) GLfloat clarity;
@end
.m
#import "GPUImageClarityFilter.h"
#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
NSString *const kGPUImageClarityFragmentShaderString = SHADER_STRING
(
uniform sampler2D inputImageTexture;
uniform lowp float clarity;
uniform highp vec2 textureSize;
varying highp vec2 textureCoordinate;
void main() {
highp vec4 color = texture2D(inputImageTexture, textureCoordinate);
highp vec4 orig = color;
/* High pass filter */
highp vec4 highpass = color * 5.0;
highp float dx = 1.0 / textureSize.x;
highp float dy = 1.0 / textureSize.y;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, -dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, -dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, -dy * 2.0)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, -dy * 2.0)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, dy * 2.0)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, dy * 2.0)) * -0.625;
highpass.a = 1.0;
/* Overlay blend */
highp vec3 overlay = vec3(1.0);
if (highpass.r <= 0.5) {
overlay.r = 2.0 * color.r * highpass.r;
} else {
overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r);
}
if (highpass.g <= 0.5) {
overlay.g = 2.0 * color.g * highpass.g;
} else {
overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g);
}
if (highpass.b <= 0.5) {
overlay.b = 2.0 * color.b * highpass.b;
} else {
overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b);
}
color.rgb = (overlay * 0.8) + (orig.rgb * 0.2);
/* Desaturated hard light */
highp vec3 desaturated = vec3(orig.r + orig.g + orig.b / 3.0);
if (desaturated.r <= 0.5) {
color.rgb = 2.0 * color.rgb * desaturated;
} else {
color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated);
}
color = (orig * 0.6) + (color * 0.4);
/* Add back some color */
highp float average = (color.r + color.g + color.b) / 3.0;
color.rgb += (average - color.rgb) * (1.0 - 1.0 / (1.001 - 0.45));
gl_FragColor = (color * clarity) + (orig * (1.0 - clarity));
}
);
#else
NSString *const kGPUImageClarityFragmentShaderString = SHADER_STRING
(
uniform sampler2D inputImageTexture;
uniform float clarity;
uniform vec2 textureSize;
varying vec2 textureCoordinate;
void main() {
vec4 color = texture2D(inputImageTexture, textureCoordinate);
vec4 orig = color;
/* High pass filter */
vec4 highpass = color * 5.0;
float dx = 1.0 / textureSize.x;
float dy = 1.0 / textureSize.y;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, -dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, -dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, dy)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, -dy * 2.0)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, -dy * 2.0)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, dy * 2.0)) * -0.625;
highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, dy * 2.0)) * -0.625;
highpass.a = 1.0;
/* Overlay blend */
vec3 overlay = vec3(1.0);
if (highpass.r <= 0.5) {
overlay.r = 2.0 * color.r * highpass.r;
} else {
overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r);
}
if (highpass.g <= 0.5) {
overlay.g = 2.0 * color.g * highpass.g;
} else {
overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g);
}
if (highpass.b <= 0.5) {
overlay.b = 2.0 * color.b * highpass.b;
} else {
overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b);
}
color.rgb = (overlay * 0.8) + (orig.rgb * 0.2);
/* Desaturated hard light */
vec3 desaturated = vec3(orig.r + orig.g + orig.b / 3.0);
if (desaturated.r <= 0.5) {
color.rgb = 2.0 * color.rgb * desaturated;
} else {
color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated);
}
color = (orig * 0.6) + (color * 0.4);
/* Add back some color */
float average = (color.r + color.g + color.b) / 3.0;
color.rgb += (average - color.rgb) * (1.0 - 1.0 / (1.001 - 0.45));
gl_FragColor = (color * clarity) + (orig * (1.0 - clarity));
}
);
#endif
@implementation GPUImageClarityFilter
@synthesize clarity = _clarity;
#pragma mark -
#pragma mark Initialization and teardown
- (id)init;
{
if (!(self = [super initWithFragmentShaderFromString:kGPUImageClarityFragmentShaderString]))
{
return nil;
}
clarityUniform = [filterProgram uniformIndex:@"clarity"];
self.clarity = 0.0;
return self;
}
#pragma mark -
#pragma mark Accessors
- (void)setClarity:(GLfloat)clarity;
{
_clarity = clarity;
[self setFloat:_clarity forUniform:clarityUniform program:filterProgram];
}
@end
我想做的另一件事是應用GPUImage的內置低通和高通濾波器,但是我感覺最終將是一個笨拙的解決方案。
這可能是由於textureSize
不是作為GPUImageFilter的一部分提供給您的標准統一。 inputImageTexture
和textureCoordinate
是這些過濾器之一提供的標准制服,看起來您正在提供clarity
制服。
由於未設置textureSize
,因此默認值為0.0。 然后,您的1.0 / textureSize.x
計算將被零除,這會導致iOS片段着色器中出現黑框。
您既可以計算並提供該制服,也可以查看將自定義過濾器基於GPUImage3x3TextureSamplingFilter。 該過濾器基類將1.0 / textureSize.x
的結果作為texelWidth
統一texelWidth
(以及垂直組件的匹配texelHeight
)。 您不必計算這個。 實際上,它還會計算周圍8個像素的紋理坐標,因此您可以刪除上面的四個計算,並將其轉換為非依賴性紋理讀取。 您只需要基於2 * texelWidth
和2 * texelHeight
計算四個紋理讀取即可完成其余四個讀取。
實際上,您可能可以將該操作分為多次進行以節省計算,進行小盒子模糊處理,然后進行疊加混合,然后執行此過濾器的最后一個階段。 這樣可以進一步加快速度。
因此,您可以覆蓋
(void)setupFilterForSize:(CGSize)filterFrameSize
設置寬度和高度因子的方法,例如GPUImageSharpenFilter
。
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.