I'm working on a Vec3
template class that (not surprisingly) stores a vector of three elements of any type. Here's the class header with the irrelevant bits stripped:
template <typename T>
class Vec3
{
private:
T data[3] = {0};
public:
inline Vec3 (const T & d0, const T & d1, const T & d2) noexcept;
inline Vec3 (const Vec3 & other) noexcept;
inline void operator = (const Vec3 & other) noexcept;
inline Vec3 operator - (const Vec3 & other) noexcept;
inline bool operator == (const Vec3 & other) const noexcept;
};
typedef Vec3<float> Vec3f;
template <typename T>
inline Vec3<T> :: Vec3 (const T & d0, const T & d1, const T & d2) noexcept
{
data[0] = d0;
data[1] = d1;
data[2] = d2;
}
template <typename T>
inline Vec3<T> :: Vec3 (const Vec3<T> & other) noexcept
{
*this = other;
}
template <typename T>
inline void Vec3<T> :: operator = (const Vec3<T> & other) noexcept
{
data[0] = other.data[0];
data[1] = other.data[1];
data[2] = other.data[2];
}
template <typename T>
inline Vec3<T> Vec3<T> :: operator - (const Vec3<T> & other) noexcept
{
return Vec3<T>(
data[0] - other.data[0],
data[1] - other.data[1],
data[2] - other.data[2]
);
}
template <typename T>
inline bool Vec3<T> :: operator == (const Vec3<T> & other) const noexcept
{
if (std::is_same_v<T, double>)
{
return
DoubleEq(data[0], other.data[0]) &&
DoubleEq(data[1], other.data[1]) &&
DoubleEq(data[2], other.data[2]);
}
else if (std::is_same_v<T, float>)
{
return
FloatEq(data[0], other.data[0]) &&
FloatEq(data[1], other.data[1]) &&
FloatEq(data[2], other.data[2]);
}
else
{
return
data[0] == other.data[0] &&
data[1] == other.data[1] &&
data[2] == other.data[2];
}
}
The epsilon comparison functions are defined here:
inline bool DoubleEq (const double a, const double b)
{
return (fabs(a - b) < std::numeric_limits<double>::epsilon());
}
inline bool FloatEq (const float a, const float b)
{
return (fabs(a - b) < std::numeric_limits<float>::epsilon());
}
I'm testing it with the GoogleTest suite and am having trouble with the following snippet:
Vec3f vecf1 = {32.1432, 768.1343, 9789.1345};
Vec3f vecf2 = {643.5348, 75.4232, 20.4701};
Vec3f vecf3 = vecf1 - vecf2;
EXPECT_TRUE(vecf1 == Vec3f(32.1432, 768.1343, 9789.1345)); // Passes
EXPECT_TRUE(vecf2 == Vec3f(643.5348, 75.4232, 20.4701)); // Passes
EXPECT_TRUE(vecf3 == Vec3f(32.1432-643.5348, 768.1343-75.4232, 9789.1345-20.4701)); // Fails :(
This sould be straightforward because the numbers used in the third test to build the comparison vector are exactly the same as the ones in vecf1
and vecf2
. However, printing vec3f
and the raw operations yields different values:
std::cout << vecf3[0] << " " << vecf3[1] << " " << vecf3[2] << std::endl;
std::cout << 32.1432-643.5348 << " " << 768.1343-75.4232 << " " << 9789.1345-20.4701 << std::endl;
-611.392 692.711 9768.67
-611.392 692.711 9768.66
What's more, I've tried debugging the program and have found that the values passed to Vec3<float>::operator-(Vec3<float> const&)
differ from the ones written in the code. That's expected from floating point arithmetic loss of significance, but I don't know why this only happens then the floats are passed to a function:
119 inline Vec3<T> Vec3<T> :: operator - (const Vec3<T> & other) noexcept
(gdb) p this
$1 = (Vec3<float> * const) 0x5555555899a0
(gdb) p *this
$2 = {data = {32.1431999, 768.134277, 9789.13477}}
(gdb) p other
$3 = (const Vec3<float> &) @0x5555555899ac: {data = {643.53479, 75.4232025, 20.4701004}}
(gdb) n
124 data[2] - other.data[2]
(gdb)
123 data[1] - other.data[1],
(gdb)
122 data[0] - other.data[0],
(gdb) n
125 );
(gdb) n
126 }
(gdb) p data[0] - other.data[0]
$4 = -611.391602
(gdb) p data[1] - other.data[1]
$5 = 692.71106
(gdb) p data[2] - other.data[2]
$6 = 9768.66504
(gdb) n
Vec3Test_Subtraction_NoSideEffects_Test::TestBody (this=0x5555555898d0) at tests/vec3.hpp:218
218 std::cout << vecf3[0] << " " << vecf3[1] << " " << vecf3[2] << std::endl;
(gdb)
-611.392 692.711 9768.67
219 std::cout << 32.1432-643.5348 << " " << 768.1343-75.4232 << " " << 9789.1345-20.4701 << std::endl;
(gdb)
-611.392 692.711 9768.66
As a sanity check, Valgrind returns no errors:
1 FAILED TEST
==47278==
==47278== HEAP SUMMARY:
==47278== in use at exit: 0 bytes in 0 blocks
==47278== total heap usage: 347 allocs, 347 frees, 138,501 bytes allocated
==47278==
==47278== All heap blocks were freed -- no leaks are possible
==47278==
==47278== For lists of detected and suppressed errors, rerun with: -s
==47278== ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 0 from 0)
Why is my code evaluating floats differently in different situations? What am I doing wrong?
I see a couple of issues here:
Epsilon is a one bit error, but one bit at around 1. You should scale espilon to match the scale of your result.
The 1 bit error (1 epsilon) is good for 1 addition, the more complex the operation, the larger the bit error. So you may want to be able to specify it for re-use in other tests.
For example:
#include <cmath>
template <typename _Float>
inline bool FloatEq (_Float a, _Float b, _Float bit_margin = _Float(1)) noexcept
{
const _Float epsilon = std::max(std::abs(a), std::abs(b)) * std::numeric_limits<_Float>::epsilon();
return std::abs(b - a) < (bit_margin * epsilon);
}
The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.