I am interested, which is the optimal way of calculating the number of bits set in byte by this way
template< unsigned char byte > class BITS_SET
{
pub
#include
#include // for CHAR_BIT (most likely to be 8)
#include // for memset
#include
static const int DUMMY = -1;
// first approch : activate the O(8) function in first get try... after that its O(1);
class bitsInByteflyLUT
{
typedef unsigned char byte;
public:
bitsInByteflyLUT(); //CTOR - throws std::bad_alloc
~bitsInByteflyLUT(); //DTOR
int Get_bitsInByte(byte _byte);
private:
// CLASS DATA
int* flyLUT;
// PRIVATE FUNCTIONS
int bitsInByte(byte _byte);
// O(8) for finding how many bits are ON in a byte.
// answer can be between 0 to CHAR_BIT.
bitsInByteflyLUT(const bitsInByteflyLUT & _class); // COPY CTOR - forbidden
const bitsInByteflyLUT & operator= (const bitsInByteflyLUT& _class);
// ASSIGN OPERATOR - forbidden
};
bitsInByteflyLUT::bitsInByteflyLUT()
{
size_t nIndexes = 1 << CHAR_BIT;
try
{
flyLUT = new int[nIndexes];
}
catch (std::bad_alloc& ba)
{
throw;
}
memset(flyLUT, DUMMY, sizeof(int)*nIndexes);
}
bitsInByteflyLUT::~bitsInByteflyLUT()
{
delete[] flyLUT;
}
int bitsInByteflyLUT::Get_bitsInByte(byte _byte)
{
if (flyLUT[_byte] == DUMMY) // if its first time we try to get answer for this char.
{
flyLUT[_byte] = bitsInByte(_byte); // O(8)
}
return flyLUT[_byte]; // O(1)
}
int bitsInByteflyLUT::bitsInByte(byte _byte)
{
byte nBits = CHAR_BIT;
byte counter = 0;
byte mask = 1;
while(nBits--)
{
if(mask & _byte)
{
++counter;
}
mask <<= 1;
}
return counter;
}
int main ()
{
using std::cout;
using std::endl;
bitsInByteflyLUT flut;
for (unsigned int i = 0; i < (1 << CHAR_BIT); i += 1)
{
cout << i << " " << flut.Get_bitsInByte(i) << endl;
}
return 0;
}