[英]optimize unaligned SSE2/AVX2 XOR
在我的代碼中,我必須處理websocket數據包的“取消屏蔽”,這實質上意味着對任意長度的未對齊數據進行異或。 感謝SO( Websocket數據取消屏蔽/多字節xor )我已經發現如何(希望)使用SSE2 / AVX2擴展加速這一點,但現在看一下,在我看來,我對未對齊數據的處理完全是sub -最佳。 有沒有什么方法可以優化我的代碼或者至少使它具有相同的性能更簡單,或者我的代碼是否已經表現最佳?
這是代碼的重要部分(對於我假設數據總是至少足以運行AVX2循環一次的問題,但同時它最多只能運行幾次):
// circular shift left for uint32
int cshiftl_u32(uint32_t num, uint8_t shift) {
return (num << shift) | (num >> (32 - shift));
}
// circular shift right for uint32
int cshiftr_u32(uint32_t num, uint8_t shift) {
return (num >> shift) | (num << (32 - shift));
}
void optimized_xor_32( uint32_t mask, uint8_t *ds, uint8_t *de ) {
if (ds == de) return; // zero data len -> nothing to do
uint8_t maskOffset = 0;
// process single bytes till 4 byte alignment ( <= 3 )
for (; ds < de && ( (uint64_t)ds & (uint64_t)3 ); ds++) {
*ds ^= *((uint8_t *)(&mask) + maskOffset);
maskOffset = (maskOffset + 1) & (uint8_t)3;
}
if (ds == de) return; // done, return
if (maskOffset != 0) { // circular left-shift mask around so it works for other instructions
mask = cshiftl_u32(mask, maskOffset);
maskOffset = 0;
}
// process 4 byte block till 8 byte alignment ( <= 1 )
uint8_t *de32 = (uint8_t *)((uint64_t)de & ~((uint64_t)31));
if ( ds < de32 && ( (uint64_t)de & (uint64_t)7 ) ) {
*(uint32_t *)ds ^= mask; // mask is uint32_t
if (++ds == de) return;
}
// process 8 byte block till 16 byte alignment ( <= 1 )
uint64_t mask64 = mask | (mask << 4);
uint8_t *de64 = (uint8_t *)((uint64_t)de & ~((uint64_t)63));
if ( ds < de64 && ( (uint64_t)ds & (uint64_t)15 ) ) {
*(uint64_t *)ds ^= mask64;
if (++ds == de) return; // done, return
}
// process 16 byte block till 32 byte alignment ( <= 1) (if supported)
#ifdef CPU_SSE2
__m128i v128, v128_mask;
v128_mask = _mm_set1_epi32(mask);
uint8_t *de128 = (uint8_t *)((uint64_t)de & ~((uint64_t)127));
if ( ds < de128 && ( (uint64_t)ds & (uint64_t)31 ) ) {
v128 = _mm_load_si128((__m128i *)ds);
v128 = _mm_xor_si128(v128, v128_mask);
_mm_store_si128((__m128i *)ds, v128);
if (++ds == de) return; // done, return
}
#endif
#ifdef CPU_AVX2 // process 32 byte blocks (if supported -> haswell upwards)
__m256i v256, v256_mask;
v256_mask = _mm256_set1_epi32(mask);
uint8_t *de256 = (uint8_t *)((uint64_t)de & ~((uint64_t)255));
for (; ds < de256; ds+=32) {
v256 = _mm256_load_si256((__m256i *)ds);
v256 = _mm256_xor_si256(v256, v256_mask);
_mm256_store_si256((__m256i *)ds, v256);
}
if (ds == de) return; // done, return
#endif
#ifdef CPU_SSE2 // process remaining 16 byte blocks (if supported)
for (; ds < de128; ds+=16) {
v128 = _mm_load_si128((__m128i *)ds);
v128 = _mm_xor_si128(v128, v128_mask);
_mm_store_si128((__m128i *)ds, v128);
}
if (ds == de) return; // done, return
#endif
// process remaining 8 byte blocks
// this should always be supported, so remaining can be assumed to be executed <= 1 times
for (; ds < de64; ds += 8) {
*(uint64_t *)ds ^= mask64;
}
if (ds == de) return; // done, return
// process remaining 4 byte blocks ( <= 1)
if (ds < de32) {
*(uint32_t *)ds ^= mask;
if (++ds == de) return; // done, return
}
// process remaining bytes ( <= 3)
for (; ds < de; ds ++) {
*ds ^= *((uint8_t *)(&mask) + maskOffset);
maskOffset = (maskOffset + 1) & (uint8_t)3;
}
}
PS:請忽略使用#ifdef而不是cpuid等來進行cpu標志檢測。
與手冊中的內容不同,大多數英特爾處理器實際上非常擅長處理未對齊的數據。 由於您使用英特爾的編譯器內置函數進行矢量處理,我假設您可以訪問最新版本的icc
。
如果您無法自然地對齊數據,那么我擔心您所做的就是盡可能接近最佳性能。 在Xeon Phi(64字節向量寄存器)/未來更長的向量處理器上使代碼更易讀和可部署方面,我建議你開始使用Intel Cilk Plus 。
例:
void intel_cilk_xor(uint32_t mask, uint8_t *d, size_t length) {
while (length & 0x3) {
*(d++) ^= mask;
asm ("rold $8, %0" : "+g" (mask) :: "cc"); // rotate dword one byte left
length--;
}
// switch to 4 bytes per block
uint32_t _d = d;
length >>= 2;
// Intel Cilk Plus Array Notation
// Should expand automatically to the best possible SIMD instructions
// you are compiling for
_d[0:length] ^= mask;
}
請注意,我沒有測試此代碼,因為我現在無法訪問英特爾編譯器。 如果你遇到問題,我可以在下周回到辦公室時重復一遍。
如果您更喜歡內在函數,那么正確使用預處理器宏可以顯着減輕您的生活:
#if defined(__MIC__)
// intel Xeon Phi
#define VECTOR_BLOCKSIZE 64
// I do not remember the correct types/instructions right now
#error "TODO: MIC handling"
#elif defined(CPU_AVX2)
#define VECTOR_BLOCKSIZE 32
typedef __m256i my_vector_t;
#define VECTOR_LOAD_MASK _mm256_set1_epi32
#define VECTOR_XOR(d, mask) _mm_store_si256(d, _mm256_set1_epi32(_mm256_load_si256(d), mask))
#elif defined(CPU_SSE2)
#define VECTOR_BLOCKSIZE 16
typedef __m128i my_vector_t;
#define VECTOR_LOAD_MASK _mm128_set1_epi32
#define VECTOR_XOR(d, mask) _mm_store_si128(d, _mm128_set1_epi32(_mm128_load_si128(d), mask))
#else
#define VECTOR_BLOCKSIZE 8
#define VECTOR_LOAD_MASK(mask) ((mask) << 32 | (mask))
#define VECTOR_XOR(d, mask) (*(d)) ^= (mask)
typedef uint64_t my_vector_t;
#fi
void optimized_xor_32( uint32_t mask, uint8_t *d, size_t length ) {
size_t i;
// there really is no point in having extra
// branches for different vector lengths if they are
// executed at most once
// branch prediction is your friend here
// so we do one byte at a time until the block size
// is reached
while (length && (d & (VECTOR_BLOCKSIZE - 1))) {
*(d++) ^= mask;
asm ("rold $8, %0" : "+g" (mask) :: "cc"); // rotate dword one byte left
length--;
}
my_vector_t * d_vector = (my_vector_t *)d;
my_vector_t vector_mask = VECTOR_LOAD_MASK(mask);
size_t vector_legth = length / VECTOR_BLOCKSIZE; // compiler will optimise this to a bitshift
length &= VECTOR_BLOCKSIZE -1; // remaining length
for (i = 0; i < vector_legth; i++) {
VECTOR_XOR(d_vector + i, vector_mask);
}
// process the tail
d = (uint8_t*)(d_vector + i);
for (i = 0; i < length; i++) {
d[i] ^= mask;
asm ("rold $8, %0" : "+g" (mask) :: "cc");
}
}
另請注意:您可能希望使用x86 rotate指令而不是位移來旋轉mask
:
#define asm_rol(var, bits) asm ("rol %1, %0" : "+r" (var) : "c" ((uint8_t)bits) : "cc")
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.