32 #ifndef VSMC_RNG_MKL_HPP 33 #define VSMC_RNG_MKL_HPP 48 static void set(MKL_INT) {}
49 static constexpr MKL_INT
get() {
return 0; }
52 template <MKL_INT MaxOffset>
64 offset_ = n % MaxOffset;
67 MKL_INT
get()
const {
return offset_; }
116 using type =
unsigned MKL_INT64;
154 template <MKL_INT BRNG,
int Bits>
163 std::array<MKLResultType<Bits>, 1000> buffer;
164 const MKL_INT k =
static_cast<MKL_INT
>(buffer.size());
170 stream, static_cast<MKL_INT>(nskip), buffer.data());
174 template <MKL_INT BRNG,
int Bits>
206 template <MKL_INT BRNG,
int Bits>
212 explicit MKLEngine(MKL_UINT s = 1) : index_(M_) { seed(s); }
214 template <
typename SeedSeq>
223 MKLEngine(MKL_UINT s, MKL_INT offset) { seed(s, offset); }
227 template <
typename SeedSeq>
229 typename std::enable_if<
233 seq.generate(&s, &s + 1);
237 void seed(MKL_UINT s, MKL_INT offset)
241 stream_.reset(BRNG + off.
get(), s);
249 stream_, static_cast<MKL_INT>(M_), buffer_.data());
253 return buffer_[index_++];
259 stream_, static_cast<MKL_INT>(n), r);
270 return std::numeric_limits<result_type>::min
VSMC_MNE();
275 return std::numeric_limits<result_type>::max
VSMC_MNE();
284 if (eng1.stream_.get_brng() != eng2.stream_.get_brng())
286 std::size_t n =
static_cast<std::size_t
>(eng1.stream_.get_size());
289 eng1.stream_.save_m(s1.data());
290 eng2.stream_.save_m(s2.data());
293 if (eng1.buffer_ != eng2.buffer_)
295 if (eng1.index_ != eng2.index_)
303 return !(eng1 == eng2);
306 template <
typename CharT,
typename Traits>
308 std::basic_ostream<CharT, Traits> &os,
314 os << eng.stream_.get_brng() <<
' ';
315 std::size_t n =
static_cast<std::size_t
>(eng.stream_.get_size());
320 eng.stream_.save_m(reinterpret_cast<char *>(s.data()));
321 for (std::size_t i = 0; i != n; ++i)
323 os << eng.buffer_ <<
' ';
329 template <
typename CharT,
typename Traits>
338 std::array<result_type, M_> buffer;
339 std::size_t index = 0;
341 is >> std::ws >> brng;
343 stream.
reset(brng, 1);
347 std::size_t n =
static_cast<std::size_t
>(eng.stream_.get_size());
352 for (std::size_t i = 0; i != n; ++i)
353 is >> std::ws >> s[i];
355 stream.
load_m(reinterpret_cast<const char *>(s.data()));
359 is >> std::ws >> buffer;
360 is >> std::ws >> index;
363 eng.stream_ = stream;
364 eng.buffer_ = buffer;
372 static constexpr std::size_t M_ = 1000;
375 std::array<result_type, M_> buffer_;
423 #if INTEL_MKL_VERSION >= 110300 441 #endif // INTEL_MKL_VERSION >= 110300 443 template <MKL_INT BRNG,
int Bits>
450 template <MKL_INT BRNG,
int Bits>
454 rng.
stream().bernoulli(static_cast<MKL_INT>(n), r, p);
457 template <MKL_INT BRNG,
int Bits>
461 rng.
stream().uniform(static_cast<MKL_INT>(n), r, a, b);
464 template <MKL_INT BRNG,
int Bits>
468 rng.
stream().uniform(static_cast<MKL_INT>(n), r, a, b);
471 template <MKL_INT BRNG,
int Bits>
475 rng.
stream().uniform(static_cast<MKL_INT>(n), r, 0, 1);
478 template <MKL_INT BRNG,
int Bits>
482 rng.
stream().uniform(static_cast<MKL_INT>(n), r, 0, 1);
485 template <MKL_INT BRNG,
int Bits>
487 float *r,
float mean,
float stddev)
489 rng.
stream().gaussian(static_cast<MKL_INT>(n), r, mean, stddev);
492 template <MKL_INT BRNG,
int Bits>
494 double *r,
double mean,
double stddev)
496 rng.
stream().gaussian(static_cast<MKL_INT>(n), r, mean, stddev);
499 template <MKL_INT BRNG,
int Bits>
503 rng.
stream().exponential(static_cast<MKL_INT>(n), r, 0, 1 / lambda);
506 template <MKL_INT BRNG,
int Bits>
510 rng.
stream().exponential(static_cast<MKL_INT>(n), r, 0, 1 / lambda);
513 template <MKL_INT BRNG,
int Bits>
515 float *r,
float location,
float scale)
517 rng.
stream().laplace(static_cast<MKL_INT>(n), r, location, scale);
520 template <MKL_INT BRNG,
int Bits>
522 double *r,
double location,
double scale)
524 rng.
stream().laplace(static_cast<MKL_INT>(n), r, location, scale);
527 template <MKL_INT BRNG,
int Bits>
531 rng.
stream().weibull(static_cast<MKL_INT>(n), r, a, 0, b);
534 template <MKL_INT BRNG,
int Bits>
538 rng.
stream().weibull(static_cast<MKL_INT>(n), r, a, 0, b);
541 template <MKL_INT BRNG,
int Bits>
545 rng.
stream().cauchy(static_cast<MKL_INT>(n), r, a, b);
548 template <MKL_INT BRNG,
int Bits>
552 rng.
stream().cauchy(static_cast<MKL_INT>(n), r, a, b);
555 template <MKL_INT BRNG,
int Bits>
563 template <MKL_INT BRNG,
int Bits>
571 template <MKL_INT BRNG,
int Bits>
575 rng.
stream().lognormal(static_cast<MKL_INT>(n), r, m, s, 0, 1);
578 template <MKL_INT BRNG,
int Bits>
582 rng.
stream().lognormal(static_cast<MKL_INT>(n), r, m, s, 0, 1);
585 template <MKL_INT BRNG,
int Bits>
589 rng.
stream().gumbel(static_cast<MKL_INT>(n), r, a, b);
593 template <MKL_INT BRNG,
int Bits>
597 rng.
stream().gumbel(static_cast<MKL_INT>(n), r, a, b);
601 template <MKL_INT BRNG,
int Bits>
603 float *r,
float alpha,
float beta)
605 rng.
stream().gamma(static_cast<MKL_INT>(n), r, alpha, 0, beta);
608 template <MKL_INT BRNG,
int Bits>
610 double *r,
double alpha,
double beta)
612 rng.
stream().gamma(static_cast<MKL_INT>(n), r, alpha, 0, beta);
615 template <MKL_INT BRNG,
int Bits>
617 float *r,
float alpha,
float beta)
619 rng.
stream().beta(static_cast<MKL_INT>(n), r, alpha, beta, 0, 1);
622 template <MKL_INT BRNG,
int Bits>
624 double *r,
double alpha,
double beta)
626 rng.
stream().beta(static_cast<MKL_INT>(n), r, alpha, beta, 0, 1);
631 #endif // VSMC_RNG_MKL_HPP
int uniform_bits32(MKL_INT n, unsigned *r, MKL_INT method=VSL_RNG_METHOD_UNIFORMBITS32_STD)
viRngUniform32
void seed(MKL_UINT s, MKL_INT offset)
void laplace_distribution(RNGType &, std::size_t, RealType *, RealType, RealType)
Generating laplace random variates.
void bernoulli_distribution(RNGType &rng, std::size_t n, IntType *r, IntType p)
Generating bernoulli random variates.
Used to specify a dimension template parameter is dynamic.
void cauchy_distribution(RNGType &rng, std::size_t n, RealType *r, RealType a, RealType b)
Generating cauchy random variates.
typename std::conditional< std::is_scalar< T >::value, std::vector< T, AlignedAllocator< T >>, std::vector< T >>::type Vector
AlignedVector for scalar type and std::vector for others.
void lognormal_distribution(RNGType &, std::size_t, RealType *, RealType, RealType)
Generating lognormal random variates.
friend bool operator==(const MKLEngine< BRNG, Bits > &eng1, const MKLEngine< BRNG, Bits > &eng2)
void rng_rand(RNGType &rng, BernoulliDistribution< IntType > &dist, std::size_t n, IntType *r)
double const_sqrt_2< double >() noexcept
void exponential_distribution(RNGType &rng, std::size_t n, RealType *r, RealType lambda)
Generating exponential random variates.
void u01_distribution(RNGType &, std::size_t, RealType *)
Generate standard uniform random variates.
MKLEngine(MKL_UINT s, MKL_INT offset)
void discard(long long nskip)
void normal_distribution(RNGType &, std::size_t, RealType *, RealType, RealType)
Generating normal random variates.
MKLEngine(SeedSeq &seq, typename std::enable_if< internal::is_seed_seq< SeedSeq, MKL_UINT, MKLEngine< BRNG, Bits >>::value >::type *=nullptr)
void rayleigh_distribution(RNGType &, std::size_t, RealType *, RealType)
Generating rayleigh random variates.
static constexpr MKL_INT get()
void seed(SeedSeq &seq, typename std::enable_if< internal::is_seed_seq< SeedSeq, MKL_UINT >::value >::type *=nullptr)
void weibull_distribution(RNGType &, std::size_t, RealType *, RealType, RealType)
Generating weibull random variates.
int reset(MKL_INT brng, MKL_UINT seed)
vslNewStream
const MKLStream & stream() const
int skip_ahead(long long nskip)
vslSkipAheadStream
friend std::basic_istream< CharT, Traits > & operator>>(std::basic_istream< CharT, Traits > &is, MKLEngine< BRNG, Bits > &eng)
void extreme_value_distribution(RNGType &rng, std::size_t n, RealType *r, RealType a, RealType b)
Generating extreme_value random variates.
int load_m(const char *memptr)
vslLoadStreamM
int uniform_bits64(MKL_INT n, unsigned MKL_INT64 *r, MKL_INT method=VSL_RNG_METHOD_UNIFORMBITS64_STD)
viRngUniform64
static constexpr MKL_INT min()
friend bool operator!=(const MKLEngine< BRNG, Bits > &eng1, const MKLEngine< BRNG, Bits > &eng2)
void sub(std::size_t n, const float *a, const float *b, float *y)
friend std::basic_ostream< CharT, Traits > & operator<<(std::basic_ostream< CharT, Traits > &os, const MKLEngine< BRNG, Bits > &eng)
void beta_distribution(RNGType &rng, std::size_t n, RealType *r, RealType alpha, RealType beta)
Generating beta random variates.
float const_sqrt_2< float >() noexcept
static constexpr MKL_INT max()
void uniform_real_distribution(RNGType &, std::size_t, RealType *, RealType, RealType)
Generate uniform real random variates.
void operator()(std::size_t n, result_type *r)
void gamma_distribution(RNGType &rng, std::size_t n, RealType *r, RealType alpha, RealType beta)
Generating gamma random variates.
#define VSMC_RUNTIME_ASSERT_UTILITY_MKL_VSL_OFFSET(offset)
internal::MKLResultType< Bits > result_type
typename MKLResultTypeTrait< Bits >::type MKLResultType
static void eval(MKLStream &stream, long long nskip)
static void eval(MKLStream &stream, long long nskip)