Browse Source

Merge pull request #18 from davilizh/master

The optimized code by David Li
cl-refactor
Paweł Bylica 8 years ago
committed by GitHub
parent
commit
52ce838ddd
  1. 19
      ethminer/MinerAux.h
  2. 5
      libethash-cuda/dagger_shared.cuh
  3. 109
      libethash-cuda/dagger_shuffled.cuh
  4. 8
      libethash-cuda/ethash_cuda_miner.cpp
  5. 4
      libethash-cuda/ethash_cuda_miner.h
  6. 26
      libethash-cuda/ethash_cuda_miner_kernel.cu
  7. 3
      libethash-cuda/ethash_cuda_miner_kernel.h
  8. 16
      libethash-cuda/keccak.cuh
  9. 7
      libethcore/EthashCUDAMiner.cpp
  10. 1
      libethcore/EthashCUDAMiner.h

19
ethminer/MinerAux.h

@ -309,6 +309,21 @@ public:
}
}
}
else if (arg == "--cuda-parallel-hash" && i + 1 < argc)
{
try {
m_parallelHash = stol(argv[++i]);
if (m_parallelHash == 0 || m_parallelHash > 8)
{
throw BadArgument();
}
}
catch (...)
{
cerr << "Bad " << arg << " option: " << argv[i] << endl;
BOOST_THROW_EXCEPTION(BadArgument());
}
}
else if (arg == "--cuda-schedule" && i + 1 < argc)
{
string mode = argv[++i];
@ -511,6 +526,8 @@ public:
m_dagCreateDevice
))
exit(1);
EthashCUDAMiner::setParallelHash(m_parallelHash);
#else
cerr << "Selected CUDA mining without having compiled with -DETHASHCUDA=1 or -DBUNDLE=cudaminer" << endl;
exit(1);
@ -589,6 +606,7 @@ public:
<< " yield - Instruct CUDA to yield its thread when waiting for results from the device." << endl
<< " sync - Instruct CUDA to block the CPU thread on a synchronization primitive when waiting for the results from the device." << endl
<< " --cuda-devices <0 1 ..n> Select which CUDA GPUs to mine on. Default is to use all" << endl
<< " --cude-parallel-hash <1 2 ..8> Define how many hashes to calculate in a kernel, can be scaled to achive better performance. Default=4" << endl
#endif
;
}
@ -1035,6 +1053,7 @@ private:
unsigned m_dagCreateDevice = 0;
/// Benchmarking params
unsigned m_benchmarkWarmup = 15;
unsigned m_parallelHash = 4;
unsigned m_benchmarkTrial = 3;
unsigned m_benchmarkTrials = 5;
unsigned m_benchmarkBlock = 0;

5
libethash-cuda/dagger_shared.cuh

@ -8,6 +8,7 @@ typedef union {
} compute_hash_share;
template <uint32_t _PARALLEL_HASH>
__device__ uint64_t compute_hash(
uint64_t nonce
)
@ -65,7 +66,7 @@ __device__ uint64_t compute_hash(
__syncthreads();
}
// keccak_256(keccak_512(header..nonce) .. mix);
return keccak_f1600_final(state);
}
}

109
libethash-cuda/dagger_shuffled.cuh

@ -2,12 +2,13 @@
#include "ethash_cuda_miner_kernel.h"
#include "cuda_helper.h"
__device__ uint64_t compute_hash(
template <uint32_t _PARALLEL_HASH>
__device__ __forceinline__ uint64_t compute_hash(
uint64_t nonce
)
{
// sha3_512(header .. nonce)
uint2 state[25];
uint2 state[12];
state[4] = vectorize(nonce);
@ -17,32 +18,30 @@ __device__ uint64_t compute_hash(
const int thread_id = threadIdx.x & (THREADS_PER_HASH - 1);
const int mix_idx = thread_id & 3;
uint4 mix;
uint2 shuffle[8];
for (int i = 0; i < THREADS_PER_HASH; i++)
for (int i = 0; i < THREADS_PER_HASH; i += _PARALLEL_HASH)
{
uint4 mix[_PARALLEL_HASH];
uint32_t offset[_PARALLEL_HASH];
uint32_t init0[_PARALLEL_HASH];
// share init among threads
for (int j = 0; j < 8; j++) {
shuffle[j].x = __shfl(state[j].x, i, THREADS_PER_HASH);
shuffle[j].y = __shfl(state[j].y, i, THREADS_PER_HASH);
}
// ugly but avoids local reads/writes
if (mix_idx < 2) {
if (mix_idx == 0)
mix = vectorize2(shuffle[0], shuffle[1]);
else
mix = vectorize2(shuffle[2], shuffle[3]);
}
else {
if (mix_idx == 2)
mix = vectorize2(shuffle[4], shuffle[5]);
else
mix = vectorize2(shuffle[6], shuffle[7]);
for (int p = 0; p < _PARALLEL_HASH; p++)
{
uint2 shuffle[8];
for (int j = 0; j < 8; j++)
{
shuffle[j].x = __shfl(state[j].x, i+p, THREADS_PER_HASH);
shuffle[j].y = __shfl(state[j].y, i+p, THREADS_PER_HASH);
}
switch (mix_idx)
{
case 0: mix[p] = vectorize2(shuffle[0], shuffle[1]); break;
case 1: mix[p] = vectorize2(shuffle[2], shuffle[3]); break;
case 2: mix[p] = vectorize2(shuffle[4], shuffle[5]); break;
case 3: mix[p] = vectorize2(shuffle[6], shuffle[7]); break;
}
init0[p] = __shfl(shuffle[0].x, 0, THREADS_PER_HASH);
}
uint32_t init0 = __shfl(shuffle[0].x, 0, THREADS_PER_HASH);
for (uint32_t a = 0; a < ACCESSES; a += 4)
{
@ -50,37 +49,49 @@ __device__ uint64_t compute_hash(
for (uint32_t b = 0; b < 4; b++)
{
if (thread_id == t)
{
shuffle[0].x = fnv(init0 ^ (a + b), ((uint32_t *)&mix)[b]) % d_dag_size;
for (int p = 0; p < _PARALLEL_HASH; p++)
{
offset[p] = fnv(init0[p] ^ (a + b), ((uint32_t *)&mix[p])[b]) % d_dag_size;
offset[p] = __shfl(offset[p], t, THREADS_PER_HASH);
}
shuffle[0].x = __shfl(shuffle[0].x, t, THREADS_PER_HASH);
mix = fnv4(mix, d_dag[shuffle[0].x].uint4s[thread_id]);
#pragma unroll
for (int p = 0; p < _PARALLEL_HASH; p++)
{
//if(blockIdx.x == 0 && threadIdx.x==0 && offset[p] > (d_dag_size>>1)) //larger than half
// printf("d_dag_size = %d offset[p] = %d\n", d_dag_size, offset[p]);
mix[p] = fnv4(mix[p], d_dag[offset[p]].uint4s[thread_id]);
}
}
}
uint32_t thread_mix = fnv_reduce(mix);
for (int p = 0; p < _PARALLEL_HASH; p++)
{
uint2 shuffle[4];
uint32_t thread_mix = fnv_reduce(mix[p]);
// update mix accross threads
// update mix accross threads
shuffle[0].x = __shfl(thread_mix, 0, THREADS_PER_HASH);
shuffle[0].y = __shfl(thread_mix, 1, THREADS_PER_HASH);
shuffle[1].x = __shfl(thread_mix, 2, THREADS_PER_HASH);
shuffle[1].y = __shfl(thread_mix, 3, THREADS_PER_HASH);
shuffle[2].x = __shfl(thread_mix, 4, THREADS_PER_HASH);
shuffle[2].y = __shfl(thread_mix, 5, THREADS_PER_HASH);
shuffle[3].x = __shfl(thread_mix, 6, THREADS_PER_HASH);
shuffle[3].y = __shfl(thread_mix, 7, THREADS_PER_HASH);
if (i == thread_id) {
//move mix into state:
state[8] = shuffle[0];
state[9] = shuffle[1];
state[10] = shuffle[2];
state[11] = shuffle[3];
shuffle[0].x = __shfl(thread_mix, 0, THREADS_PER_HASH);
shuffle[0].y = __shfl(thread_mix, 1, THREADS_PER_HASH);
shuffle[1].x = __shfl(thread_mix, 2, THREADS_PER_HASH);
shuffle[1].y = __shfl(thread_mix, 3, THREADS_PER_HASH);
shuffle[2].x = __shfl(thread_mix, 4, THREADS_PER_HASH);
shuffle[2].y = __shfl(thread_mix, 5, THREADS_PER_HASH);
shuffle[3].x = __shfl(thread_mix, 6, THREADS_PER_HASH);
shuffle[3].y = __shfl(thread_mix, 7, THREADS_PER_HASH);
if ((i+p) == thread_id) {
//move mix into state:
state[8] = shuffle[0];
state[9] = shuffle[1];
state[10] = shuffle[2];
state[11] = shuffle[3];
}
}
}
// keccak_256(keccak_512(header..nonce) .. mix);
return keccak_f1600_final(state);
}
}

8
libethash-cuda/ethash_cuda_miner.cpp

@ -180,7 +180,13 @@ bool ethash_cuda_miner::configureGPU(
}
}
void ethash_cuda_miner::setParallelHash(unsigned _parallelHash)
{
m_parallelHash = _parallelHash;
}
unsigned ethash_cuda_miner::s_extraRequiredGPUMem;
unsigned ethash_cuda_miner::m_parallelHash = 4;
unsigned ethash_cuda_miner::s_blockSize = ethash_cuda_miner::c_defaultBlockSize;
unsigned ethash_cuda_miner::s_gridSize = ethash_cuda_miner::c_defaultGridSize;
unsigned ethash_cuda_miner::s_numStreams = ethash_cuda_miner::c_defaultNumStreams;
@ -363,7 +369,7 @@ void ethash_cuda_miner::search(uint8_t const* header, uint64_t target, search_ho
for (unsigned int j = 0; j < found_count; j++)
nonces[j] = nonce_base + buffer[j + 1];
}
run_ethash_search(s_gridSize, s_blockSize, m_sharedBytes, stream, buffer, m_current_nonce);
run_ethash_search(s_gridSize, s_blockSize, m_sharedBytes, stream, buffer, m_current_nonce, m_parallelHash);
if (m_current_index >= s_numStreams)
{
exit = found_count && hook.found(nonces, found_count);

4
libethash-cuda/ethash_cuda_miner.h

@ -34,6 +34,7 @@ public:
unsigned _scheduleFlag,
uint64_t _currentBlock
);
static void setParallelHash(unsigned _parallelHash);
bool init(ethash_light_t _light, uint8_t const* _lightData, uint64_t _lightSize, unsigned _deviceId, bool _cpyToHost, volatile void** hostDAG);
@ -72,4 +73,5 @@ private:
/// GPU memory required for other things, like window rendering e.t.c.
/// User can set it via the --cl-extragpu-mem argument.
static unsigned s_extraRequiredGPUMem;
};
static unsigned m_parallelHash;
};

26
libethash-cuda/ethash_cuda_miner_kernel.cu

@ -16,24 +16,20 @@
#if __CUDA_ARCH__ < SHUFFLE_MIN_VER
#include "keccak_u64.cuh"
#include "dagger_shared.cuh"
#define TPB 128
#define BPSM 4
#else
#include "keccak.cuh"
#include "dagger_shuffled.cuh"
#define TPB 896
#define BPSM 1
#endif
template <uint32_t _PARALLEL_HASH>
__global__ void
__launch_bounds__(TPB, BPSM)
ethash_search(
volatile uint32_t* g_output,
uint64_t start_nonce
)
{
uint32_t const gid = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t hash = compute_hash(start_nonce + gid);
uint64_t hash = compute_hash<_PARALLEL_HASH>(start_nonce + gid);
if (cuda_swab64(hash) > d_target) return;
uint32_t index = atomicInc(const_cast<uint32_t*>(g_output), SEARCH_RESULT_BUFFER_SIZE - 1) + 1;
g_output[index] = gid;
@ -45,18 +41,30 @@ void run_ethash_search(
uint32_t sharedbytes,
cudaStream_t stream,
volatile uint32_t* g_output,
uint64_t start_nonce
uint64_t start_nonce,
uint32_t parallelHash
)
{
ethash_search << <blocks, threads, sharedbytes, stream >> >(g_output, start_nonce);
switch (parallelHash)
{
case 1: ethash_search <1> <<<blocks, threads, sharedbytes, stream >>>(g_output, start_nonce); break;
case 2: ethash_search <2> <<<blocks, threads, sharedbytes, stream >>>(g_output, start_nonce); break;
case 3: ethash_search <3> <<<blocks, threads, sharedbytes, stream >>>(g_output, start_nonce); break;
case 4: ethash_search <4> <<<blocks, threads, sharedbytes, stream >>>(g_output, start_nonce); break;
case 5: ethash_search <5> <<<blocks, threads, sharedbytes, stream >>>(g_output, start_nonce); break;
case 6: ethash_search <6> <<<blocks, threads, sharedbytes, stream >>>(g_output, start_nonce); break;
case 7: ethash_search <7> <<<blocks, threads, sharedbytes, stream >>>(g_output, start_nonce); break;
case 8: ethash_search <8> <<<blocks, threads, sharedbytes, stream >>>(g_output, start_nonce); break;
default: ethash_search <4> <<<blocks, threads, sharedbytes, stream >>>(g_output, start_nonce); break;
}
CUDA_SAFE_CALL(cudaGetLastError());
}
#define ETHASH_DATASET_PARENTS 256
#define NODE_WORDS (64/4)
__global__ void
__launch_bounds__(128, 7)
ethash_calculate_dag_item(uint32_t start)
{
uint32_t const node_index = start + blockIdx.x * blockDim.x + threadIdx.x;

3
libethash-cuda/ethash_cuda_miner_kernel.h

@ -52,7 +52,8 @@ void run_ethash_search(
uint32_t sharedbytes,
cudaStream_t stream,
volatile uint32_t* g_output,
uint64_t start_nonce
uint64_t start_nonce,
uint32_t parallelHash
);
void ethash_generate_dag(

16
libethash-cuda/keccak.cuh

@ -25,10 +25,13 @@ uint2 chi(const uint2 a, const uint2 b, const uint2 c) {
return a ^ (~b) & c;
}
__device__ __forceinline__ void keccak_f1600_init(uint2* s)
__device__ __forceinline__ void keccak_f1600_init(uint2* state)
{
uint2 s[25];
uint2 t[5], u, v;
s[4] = state[4];
devectorize2(d_header.uint4s[0], s[0], s[1]);
devectorize2(d_header.uint4s[1], s[2], s[3]);
@ -328,12 +331,19 @@ __device__ __forceinline__ void keccak_f1600_init(uint2* s)
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[23]);
for(int i = 0; i < 12; ++i)
state[i] = s[i];
}
__device__ __forceinline__ uint64_t keccak_f1600_final(uint2* s)
__device__ __forceinline__ uint64_t keccak_f1600_final(uint2* state)
{
uint2 s[25];
uint2 t[5], u, v;
for (int i = 0; i < 12; ++i)
s[i] = state[i];
for (uint32_t i = 12; i < 25; i++)
{
s[i] = make_uint2(0, 0);
@ -774,4 +784,4 @@ __device__ __forceinline__ void SHA3_512(uint2* s) {
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[23]);
}
}

7
libethcore/EthashCUDAMiner.cpp

@ -261,4 +261,9 @@ bool EthashCUDAMiner::configureGPU(
return true;
}
#endif
void EthashCUDAMiner::setParallelHash(unsigned _parallelHash)
{
ethash_cuda_miner::setParallelHash(_parallelHash);
}
#endif

1
libethcore/EthashCUDAMiner.h

@ -51,6 +51,7 @@ class EthashCUDAHook;
static std::string platformInfo();
static unsigned getNumDevices();
static void listDevices();
static void setParallelHash(unsigned _parallelHash);
static bool configureGPU(
unsigned _blockSize,
unsigned _gridSize,

Loading…
Cancel
Save