[英]PyOpenCL - Different results on Intel and NVidia
我正在尝试运行基于高斯脉冲传播的仿真。 我正在使用i5 4590和GTX 970(最新驱动程序)的Windows台式机与2015年初的Macbook air进行交叉开发。
当运行我的主代码时,我在桌面上无法获得任何不错的结果(计算结果有所不同),但是在我的mac上,结果似乎还可以。
为了进一步研究该问题,我尝试运行简单的高斯传播。 Macbook上的结果或多或少是可以的,而在台式机上则完全是一团糟。
我在两台计算机上运行相同的代码,并且都具有相同的python(2.7.10)和相应模块的分发。
这是我的代码
import scipy as sp
import pyopencl as cl
import matplotlib.pyplot as plot
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
MF = cl.mem_flags
dx = 0.01
X = sp.arange(0.0, 100, dx)
N = len(X)
A_h = (sp.exp(-(X-50)**2/10.)*sp.exp(-1j*1000*X)).astype(sp.complex64)
A_d = cl.Buffer(ctx, MF.READ_WRITE | MF.COPY_HOST_PTR, hostbuf=A_h)
plot.figure()
plot.plot(X, abs(A_h) ** 2 / (abs(A_h) ** 2).max())
Source = """
#define complex_ctr(x, y) (float2)(x, y)
#define complex_mul(a, b) complex_ctr(mad(-(a).y, (b).y, (a).x * (b).x), mad((a).y, (b).x, (a).x * (b).y))
#define complex_unit (float2)(0, 1)
__kernel void propagate(__global float2 *A){
const int gid_x = get_global_id(0);
float EPS = 0.1f;
A[gid_x] = A[gid_x] + EPS*complex_mul((A[gid_x+1] + A[gid_x-1]), complex_unit);
}
"""
prg = cl.Program(ctx, Source).build()
for i in range(3000):
print i
event = prg.propagate(queue, (N,), None, A_d)
event.wait()
cl.enqueue_copy(queue, A_h, A_d)
plot.plot(X, abs(A_h) ** 2 / (abs(A_h) ** 2).max())
plot.show()
这是结果
Mac结果:
绿线表示传播后的高斯,蓝线表示初始的高斯
在NVidia方面可能导致此问题的原因是什么? 我认为我错过了防止这种情况发生的关键步骤,并且由于运气的原因它在Mac上运行
编辑
这是我根据用户建议运行的最终代码
import scipy as sp
import pyopencl as cl
import matplotlib.pyplot as plot
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
MF = cl.mem_flags
dx = sp.float32(0.001)
X = sp.arange(0.0, 100, dx).astype(sp.float32)
N = len(X)
A_h = (sp.exp(-(X-50)**2/10.)*sp.exp(1j*1000*X)).astype(sp.complex64)
A_d = cl.Buffer(ctx, MF.READ_WRITE | MF.COPY_HOST_PTR, hostbuf=A_h)
B_d = cl.Buffer(ctx, MF.READ_WRITE | MF.COPY_HOST_PTR, hostbuf=A_h)
plot.figure()
plot.plot(X, abs(A_h) ** 2 / (abs(A_h) ** 2).max())
Source = """
#define complex_ctr(x, y) (float2)(x, y)
#define complex_mul(a, b) complex_ctr((a).x * (b).x - (a).y * (b).y, (a).x * (b).y + (a).y * (b).x)
#define complex_unit (float2)(0, 1)
__kernel void propagate(__global float2 *A){
const int gid_x = get_global_id(0);
float EPS = 0.1f;
float2 a1, a2;
a1 = A[gid_x-1];
a2 = A[gid_x+1];
barrier(CLK_GLOBAL_MEM_FENCE);
A[gid_x] += EPS*complex_mul((a1 + a2), complex_unit);
}
"""
prg = cl.Program(ctx, Source).build()
for i in range(12000):
print i
evolve = prg.propagate(queue, (N,), None, A_d)
evolve.wait()
cl.enqueue_copy(queue, A_h, A_d)
plot.plot(X, abs(A_h) ** 2)
plot.show()
编辑:哦,刚刚阅读@talonmies评论,这是与此相同的解决方案。
此代码在OpenCL中不安全,它具有datarace问题:
A[gid_x] = A[gid_x] + EPS*complex_mul((A[gid_x+1] + A[gid_x-1]), complex_unit);
每个工作项x
使用x+1
和x-1
。 根据工作项目的时间表,结果将有所不同。
改用2个缓冲区,从A读取,写入B,很容易:
import scipy as sp
import pyopencl as cl
import matplotlib.pyplot as plot
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
MF = cl.mem_flags
dx = 0.01
X = sp.arange(0.0, 100, dx)
N = len(X)
A_h = (sp.exp(-(X-50)**2/10.)*sp.exp(-1j*1000*X)).astype(sp.complex64)
A_d = cl.Buffer(ctx, MF.READ_WRITE | MF.COPY_HOST_PTR, hostbuf=A_h)
B_d = cl.Buffer(ctx, MF.READ_WRITE)
plot.figure()
plot.plot(X, abs(A_h) ** 2 / (abs(A_h) ** 2).max())
Source = """
#define complex_ctr(x, y) (float2)(x, y)
#define complex_mul(a, b) complex_ctr(mad(-(a).y, (b).y, (a).x * (b).x), mad((a).y, (b).x, (a).x * (b).y))
#define complex_unit (float2)(0, 1)
__kernel void propagate(__global float2 *A, __global float2 *B){
const int gid_x = get_global_id(0);
float EPS = 0.1f;
B[gid_x] = A[gid_x] + EPS*complex_mul((A[gid_x+1] + A[gid_x-1]), complex_unit);
}
"""
prg = cl.Program(ctx, Source).build()
for i in range(3000):
print i
event = prg.propagate(queue, (N,), None, A_d, B_d)
A_d, B_d = B_d, A_d #Swap buffers, so A always has results
#event.wait() #You don't need this, this is slowing terribly the execution, enqueue_copy already waits
cl.enqueue_copy(queue, A_h, A_d)
plot.plot(X, abs(A_h) ** 2 / (abs(A_h) ** 2).max())
plot.show()
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.