piranha  0.10
mp++.hpp
1 /* Copyright 2016-2017 Francesco Biscani (bluescarni@gmail.com)
2 
3 This file is part of the mp++ library.
4 
5 The mp++ library is free software; you can redistribute it and/or modify
6 it under the terms of either:
7 
8  * the GNU Lesser General Public License as published by the Free
9  Software Foundation; either version 3 of the License, or (at your
10  option) any later version.
11 
12 or
13 
14  * the GNU General Public License as published by the Free Software
15  Foundation; either version 3 of the License, or (at your option) any
16  later version.
17 
18 or both in parallel, as here.
19 
20 The mp++ library is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
22 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
23 for more details.
24 
25 You should have received copies of the GNU General Public License and the
26 GNU Lesser General Public License along with the mp++ library. If not,
27 see https://www.gnu.org/licenses/. */
28 
29 #ifndef MPPP_MPPP_HPP
30 #define MPPP_MPPP_HPP
31 
32 #include <algorithm>
33 #include <array>
34 #include <cassert>
35 #include <cmath>
36 #include <cstddef>
37 #include <cstdint>
38 #include <cstdlib>
39 #include <functional>
40 #include <gmp.h>
41 #include <initializer_list>
42 #include <iostream>
43 #include <limits>
44 #include <new>
45 #include <stdexcept>
46 #include <string>
47 #include <type_traits>
48 #include <typeinfo>
49 #include <utility>
50 #include <vector>
51 
52 #if __GNU_MP_VERSION < 5
53 
54 #error Minimum supported GMP version is 5.
55 
56 #endif
57 
58 #if defined(MPPP_WITH_LONG_DOUBLE)
59 
60 #include <mpfr.h>
61 
62 #if MPFR_VERSION_MAJOR < 3
63 
64 #error Minimum supported MPFR version is 3.
65 
66 #endif
67 
68 #endif
69 
70 // Compiler configuration.
71 #if defined(__clang__) || defined(__GNUC__) || defined(__INTEL_COMPILER)
72 
73 #define mppp_likely(x) __builtin_expect(!!(x), 1)
74 #define mppp_unlikely(x) __builtin_expect(!!(x), 0)
75 #define MPPP_RESTRICT __restrict
76 
77 // NOTE: we can check int128 on GCC/clang with __SIZEOF_INT128__ apparently:
78 // http://stackoverflow.com/questions/21886985/what-gcc-versions-support-the-int128-intrinsic-type
79 #if defined(__SIZEOF_INT128__)
80 #define MPPP_UINT128 __uint128_t
81 #endif
82 
83 #elif defined(_MSC_VER)
84 
85 // Disable clang-format here, as the include order seems to matter.
86 // clang-format off
87 #include <windows.h>
88 #include <Winnt.h>
89 // clang-format on
90 
91 #define mppp_likely(x) (x)
92 #define mppp_unlikely(x) (x)
93 #define MPPP_RESTRICT __restrict
94 
95 // Disable some warnings for MSVC.
96 #pragma warning(push)
97 #pragma warning(disable : 4127)
98 #pragma warning(disable : 4146)
99 #pragma warning(disable : 4804)
100 
101 // Checked iterators functionality.
102 #include <iterator>
103 
104 #else
105 
106 #define mppp_likely(x) (x)
107 #define mppp_unlikely(x) (x)
108 #define MPPP_RESTRICT
109 
110 #endif
111 
112 // Configuration of the thread_local keyword.
113 #if defined(__apple_build_version__) || defined(__MINGW32__) || defined(__INTEL_COMPILER)
114 
115 // - Apple clang does not support the thread_local keyword until very recent versions.
116 // - Testing shows that at least some MinGW versions have buggy thread_local implementations.
117 // - Also on Intel the thread_local keyword looks buggy.
118 #define MPPP_MAYBE_TLS
119 
120 #else
121 
122 // For the rest, we assume thread_local is available.
123 #define MPPP_HAVE_THREAD_LOCAL
124 #define MPPP_MAYBE_TLS static thread_local
125 
126 #endif
127 
128 // Namespace setup.
129 #if defined(MPPP_CUSTOM_NAMESPACE)
130 
131 #define MPPP_NAMESPACE MPPP_CUSTOM_NAMESPACE
132 
133 #else
134 
135 #define MPPP_NAMESPACE mppp
136 
137 #endif
138 
140 namespace MPPP_NAMESPACE
141 {
142 
143 namespace mppp_impl
144 {
145 
146 // A bunch of useful utilities from C++14/C++17.
147 
148 // http://en.cppreference.com/w/cpp/types/void_t
149 template <typename... Ts>
150 struct make_void {
151  typedef void type;
152 };
153 
154 template <typename... Ts>
155 using void_t = typename make_void<Ts...>::type;
156 
157 // http://en.cppreference.com/w/cpp/experimental/is_detected
158 template <class Default, class AlwaysVoid, template <class...> class Op, class... Args>
159 struct detector {
160  using value_t = std::false_type;
161  using type = Default;
162 };
163 
164 template <class Default, template <class...> class Op, class... Args>
165 struct detector<Default, void_t<Op<Args...>>, Op, Args...> {
166  using value_t = std::true_type;
167  using type = Op<Args...>;
168 };
169 
170 // http://en.cppreference.com/w/cpp/experimental/nonesuch
171 struct nonesuch {
172  nonesuch() = delete;
173  ~nonesuch() = delete;
174  nonesuch(nonesuch const &) = delete;
175  void operator=(nonesuch const &) = delete;
176 };
177 
178 template <template <class...> class Op, class... Args>
179 using is_detected = typename detector<nonesuch, void, Op, Args...>::value_t;
180 
181 template <template <class...> class Op, class... Args>
182 using detected_t = typename detector<nonesuch, void, Op, Args...>::type;
183 
184 // http://en.cppreference.com/w/cpp/types/conjunction
185 template <class...>
186 struct conjunction : std::true_type {
187 };
188 
189 template <class B1>
190 struct conjunction<B1> : B1 {
191 };
192 
193 template <class B1, class... Bn>
194 struct conjunction<B1, Bn...> : std::conditional<B1::value != false, conjunction<Bn...>, B1>::type {
195 };
196 
197 // http://en.cppreference.com/w/cpp/types/disjunction
198 template <class...>
199 struct disjunction : std::false_type {
200 };
201 
202 template <class B1>
203 struct disjunction<B1> : B1 {
204 };
205 
206 template <class B1, class... Bn>
207 struct disjunction<B1, Bn...> : std::conditional<B1::value != false, B1, disjunction<Bn...>>::type {
208 };
209 
210 // http://en.cppreference.com/w/cpp/types/negation
211 template <class B>
212 struct negation : std::integral_constant<bool, !B::value> {
213 };
214 
215 // mpz_t is an array of some struct.
216 using mpz_struct_t = std::remove_extent<::mpz_t>::type;
217 // Integral types used for allocation size and number of limbs.
218 using mpz_alloc_t = decltype(std::declval<mpz_struct_t>()._mp_alloc);
219 using mpz_size_t = decltype(std::declval<mpz_struct_t>()._mp_size);
220 
221 // Some misc tests to check that the mpz struct conforms to our expectations.
222 // This is crucial for the implementation of the union integer type.
223 struct expected_mpz_struct_t {
224  mpz_alloc_t _mp_alloc;
225  mpz_size_t _mp_size;
226  ::mp_limb_t *_mp_d;
227 };
228 
229 static_assert(sizeof(expected_mpz_struct_t) == sizeof(mpz_struct_t) && std::is_standard_layout<mpz_struct_t>::value
230  && std::is_standard_layout<expected_mpz_struct_t>::value && offsetof(mpz_struct_t, _mp_alloc) == 0u
231  && offsetof(mpz_struct_t, _mp_size) == offsetof(expected_mpz_struct_t, _mp_size)
232  && offsetof(mpz_struct_t, _mp_d) == offsetof(expected_mpz_struct_t, _mp_d)
233  && std::is_same<::mp_limb_t *, decltype(std::declval<mpz_struct_t>()._mp_d)>::value &&
234  // mp_bitcnt_t is used in shift operators, so we double-check it is unsigned. If it is not
235  // we might end up shifting by negative values, which is UB.
236  std::is_unsigned<::mp_bitcnt_t>::value &&
237  // The mpn functions accept sizes as ::mp_size_t, but we generally represent sizes as mpz_size_t.
238  // We need then to make sure we can always cast safely mpz_size_t to ::mp_size_t. Inspection
239  // of the gmp.h header seems to indicate that mpz_size_t is never larger than ::mp_size_t.
240  std::numeric_limits<mpz_size_t>::min() >= std::numeric_limits<::mp_size_t>::min()
241  && std::numeric_limits<mpz_size_t>::max() <= std::numeric_limits<::mp_size_t>::max(),
242  "Invalid mpz_t struct layout and/or GMP types.");
243 
244 // Helper function to init an mpz to zero with nlimbs preallocated limbs.
245 inline void mpz_init_nlimbs(mpz_struct_t &rop, std::size_t nlimbs)
246 {
247  // A bit of horrid overflow checking.
248  if (mppp_unlikely(nlimbs > std::numeric_limits<std::size_t>::max() / unsigned(GMP_NUMB_BITS))) {
249  // NOTE: here we are doing what GMP does in case of memory allocation errors. It does not make much sense
250  // to do anything else, as long as GMP does not provide error recovery.
251  std::abort();
252  }
253  const auto nbits = static_cast<std::size_t>(unsigned(GMP_NUMB_BITS) * nlimbs);
254  if (mppp_unlikely(nbits > std::numeric_limits<::mp_bitcnt_t>::max())) {
255  std::abort();
256  }
257  // NOTE: nbits == 0 is allowed.
258  ::mpz_init2(&rop, static_cast<::mp_bitcnt_t>(nbits));
259  assert(std::make_unsigned<mpz_size_t>::type(rop._mp_alloc) >= nlimbs);
260 }
261 
262 // Combined init+set.
263 inline void mpz_init_set_nlimbs(mpz_struct_t &m0, const mpz_struct_t &m1)
264 {
265  mpz_init_nlimbs(m0, ::mpz_size(&m1));
266  ::mpz_set(&m0, &m1);
267 }
268 
269 // Simple RAII holder for GMP integers.
270 struct mpz_raii {
271  mpz_raii()
272  {
273  ::mpz_init(&m_mpz);
274  assert(m_mpz._mp_alloc >= 0);
275  }
276  mpz_raii(const mpz_raii &) = delete;
277  mpz_raii(mpz_raii &&) = delete;
278  mpz_raii &operator=(const mpz_raii &) = delete;
279  mpz_raii &operator=(mpz_raii &&) = delete;
280  ~mpz_raii()
281  {
282  // NOTE: even in recent GMP versions, with lazy allocation,
283  // it seems like the pointer always points to something:
284  // https://gmplib.org/repo/gmp/file/835f8974ff6e/mpz/init.c
285  assert(m_mpz._mp_d != nullptr);
286  ::mpz_clear(&m_mpz);
287  }
288  mpz_struct_t m_mpz;
289 };
290 
291 #if defined(MPPP_WITH_LONG_DOUBLE)
292 
293 // mpfr_t is an array of some struct.
294 using mpfr_struct_t = std::remove_extent<::mpfr_t>::type;
295 
296 // Simple RAII holder for MPFR floats.
297 struct mpfr_raii {
298  mpfr_raii()
299  {
300  ::mpfr_init2(&m_mpfr, 53);
301  }
302  ~mpfr_raii()
303  {
304  ::mpfr_clear(&m_mpfr);
305  }
306  mpfr_struct_t m_mpfr;
307 };
308 
309 // A couple of sanity checks when constructing temporary mpfrs from long double.
310 static_assert(std::numeric_limits<long double>::digits10 < std::numeric_limits<int>::max() / 4, "Overflow error.");
311 static_assert(std::numeric_limits<long double>::digits10 * 4 < std::numeric_limits<::mpfr_prec_t>::max(),
312  "Overflow error.");
313 
314 #endif
315 
316 // Convert an mpz to a string in a specific base, to be written into out.
317 inline void mpz_to_str(std::vector<char> &out, const mpz_struct_t *mpz, int base = 10)
318 {
319  assert(base >= 2 && base <= 62);
320  const auto size_base = ::mpz_sizeinbase(mpz, base);
321  if (mppp_unlikely(size_base > std::numeric_limits<std::size_t>::max() - 2u)) {
322  throw std::overflow_error("Too many digits in the conversion of mpz_t to string.");
323  }
324  // Total max size is the size in base plus an optional sign and the null terminator.
325  const auto total_size = size_base + 2u;
326  // NOTE: possible improvement: use a null allocator to avoid initing the chars each time
327  // we resize up.
328  out.resize(static_cast<std::vector<char>::size_type>(total_size));
329  // Overflow check.
330  if (mppp_unlikely(out.size() != total_size)) {
331  throw std::overflow_error("Too many digits in the conversion of mpz_t to string.");
332  }
333  ::mpz_get_str(out.data(), base, mpz);
334 }
335 
336 // Convenience overload for the above.
337 inline std::string mpz_to_str(const mpz_struct_t *mpz, int base = 10)
338 {
339  MPPP_MAYBE_TLS std::vector<char> tmp;
340  mpz_to_str(tmp, mpz, base);
341  return tmp.data();
342 }
343 
344 // Type trait to check if T is a supported integral type.
345 template <typename T>
346 using is_supported_integral
347  = std::integral_constant<bool, disjunction<std::is_same<T, bool>, std::is_same<T, char>,
348  std::is_same<T, signed char>, std::is_same<T, unsigned char>,
349  std::is_same<T, short>, std::is_same<T, unsigned short>,
350  std::is_same<T, int>, std::is_same<T, unsigned>, std::is_same<T, long>,
351  std::is_same<T, unsigned long>, std::is_same<T, long long>,
352  std::is_same<T, unsigned long long>>::value>;
353 
354 // Type trait to check if T is a supported floating-point type.
355 template <typename T>
356 using is_supported_float = std::integral_constant<bool, disjunction<std::is_same<T, float>, std::is_same<T, double>
357 #if defined(MPPP_WITH_LONG_DOUBLE)
358  ,
359  std::is_same<T, long double>
360 #endif
361  >::value>;
362 
363 template <typename T>
364 using is_supported_interop
365  = std::integral_constant<bool, disjunction<is_supported_integral<T>, is_supported_float<T>>::value>;
366 
367 // Small wrapper to copy limbs.
368 inline void copy_limbs(const ::mp_limb_t *begin, const ::mp_limb_t *end, ::mp_limb_t *out)
369 {
370  for (; begin != end; ++begin, ++out) {
371  *out = *begin;
372  }
373 }
374 
375 // The version with non-overlapping ranges.
376 inline void copy_limbs_no(const ::mp_limb_t *begin, const ::mp_limb_t *end, ::mp_limb_t *MPPP_RESTRICT out)
377 {
378  assert(begin != out);
379  for (; begin != end; ++begin, ++out) {
380  *out = *begin;
381  }
382 }
383 
384 // Add a and b, store the result in res, and return 1 if there's unsigned overflow, 0 othersize.
385 // NOTE: recent GCC versions have builtins for this, but they don't seem to make much of a difference:
386 // https://gcc.gnu.org/onlinedocs/gcc/Integer-Overflow-Builtins.html
387 inline ::mp_limb_t limb_add_overflow(::mp_limb_t a, ::mp_limb_t b, ::mp_limb_t *res)
388 {
389  *res = a + b;
390  return *res < a;
391 }
392 
393 // The static integer class.
394 template <std::size_t SSize>
395 struct static_int {
396  // Just a small helper, like C++14.
397  template <bool B, typename T = void>
398  using enable_if_t = typename std::enable_if<B, T>::type;
399  // Let's put a hard cap and sanity check on the static size.
400  static_assert(SSize > 0u && SSize <= 64u, "Invalid static size.");
401  using limbs_type = std::array<::mp_limb_t, SSize>;
402  // Cast it to mpz_size_t for convenience.
403  static const mpz_size_t s_size = SSize;
404  // Special alloc value to signal static storage in the union.
405  static const mpz_alloc_t s_alloc = -1;
406  // The largest number of limbs for which special optimisations are activated.
407  // This is important because the arithmetic optimisations rely on the unused limbs
408  // being zero, thus whenever we use mpn functions on a static int we need to
409  // take care of ensuring that this invariant is respected (see dtor_checks() and
410  // zero_unused_limbs(), for instance).
411  static const std::size_t opt_size = 2;
412  // NOTE: init limbs to zero: in some few-limbs optimisations we operate on the whole limb
413  // array regardless of the integer size, for performance reasons. If we didn't init to zero,
414  // we would read from uninited storage and we would have wrong results as well.
415  static_int() : _mp_alloc(s_alloc), _mp_size(0), m_limbs()
416  {
417  }
418  // The defaults here are good.
419  static_int(const static_int &) = default;
420  static_int(static_int &&) = default;
421  static_int &operator=(const static_int &) = default;
422  static_int &operator=(static_int &&) = default;
423  bool dtor_checks() const
424  {
425  const auto asize = abs_size();
426  // Check the value of the alloc member.
427  if (_mp_alloc != s_alloc) {
428  return false;
429  }
430  // Check the asize is not too large.
431  if (asize > s_size) {
432  return false;
433  }
434  // Check that all limbs which do not participate in the value are zero, iff the SSize is small enough.
435  // For small SSize, we might be using optimisations that require unused limbs to be zero.
436  if (SSize <= opt_size) {
437  for (auto i = static_cast<std::size_t>(asize); i < SSize; ++i) {
438  if (m_limbs[i]) {
439  return false;
440  }
441  }
442  }
443  // Check that the highest limb is not zero.
444  if (asize > 0 && (m_limbs[static_cast<std::size_t>(asize - 1)] & GMP_NUMB_MASK) == 0u) {
445  return false;
446  }
447  return true;
448  }
449  ~static_int()
450  {
451  assert(dtor_checks());
452  }
453  // Zero the limbs that are not used for representing the value.
454  // This is normally not needed, but it is useful when using the GMP mpn api on a static int:
455  // the GMP api does not clear unused limbs, but we rely on unused limbs being zero when optimizing operations
456  // for few static limbs.
457  void zero_unused_limbs()
458  {
459  // Don't do anything if the static size is larger that the maximum size for which the
460  // few-limbs optimisations are activated.
461  if (SSize <= opt_size) {
462  for (auto i = static_cast<std::size_t>(abs_size()); i < SSize; ++i) {
463  m_limbs[i] = 0u;
464  }
465  }
466  }
467  // Size in limbs (absolute value of the _mp_size member).
468  mpz_size_t abs_size() const
469  {
470  return _mp_size >= 0 ? _mp_size : -_mp_size;
471  }
472  struct static_mpz_view {
473  // NOTE: this is needed when we have the variant view in the integer class: if the active view
474  // is the dynamic one, we need to def construct a static view that we will never use.
475  // NOTE: m_mpz needs to be zero inited because otherwise when using the move ctor we will
476  // be reading from uninited memory.
477  // NOTE: use round parentheses here in an attempt to shut a GCC warning that happens with curly
478  // braces - they should be equivalent, see:
479  // http://en.cppreference.com/w/cpp/language/value_initialization
480  static_mpz_view() : m_mpz()
481  {
482  }
483  // NOTE: we use the const_cast to cast away the constness from the pointer to the limbs
484  // in n. This is valid as we are never going to use this pointer for writing.
485  // NOTE: in recent GMP versions there are functions to accomplish this type of read-only init
486  // of an mpz:
487  // https://gmplib.org/manual/Integer-Special-Functions.html#Integer-Special-Functions
488  explicit static_mpz_view(const static_int &n)
489  : m_mpz{s_size, n._mp_size, const_cast<::mp_limb_t *>(n.m_limbs.data())}
490  {
491  }
492  static_mpz_view(static_mpz_view &&) = default;
493  static_mpz_view(const static_mpz_view &) = delete;
494  static_mpz_view &operator=(const static_mpz_view &) = delete;
495  static_mpz_view &operator=(static_mpz_view &&) = delete;
496  operator const mpz_struct_t *() const
497  {
498  return &m_mpz;
499  }
500  mpz_struct_t m_mpz;
501  };
502  static_mpz_view get_mpz_view() const
503  {
504  return static_mpz_view{*this};
505  }
506  mpz_alloc_t _mp_alloc;
507  mpz_size_t _mp_size;
508  limbs_type m_limbs;
509 };
510 
511 // {static_int,mpz} union.
512 template <std::size_t SSize>
513 union integer_union {
514 public:
515  using s_storage = static_int<SSize>;
516  using d_storage = mpz_struct_t;
517  // Def ctor, will init to static.
518  integer_union() : m_st()
519  {
520  }
521  // Copy constructor, does a deep copy maintaining the storage class of other.
522  integer_union(const integer_union &other)
523  {
524  if (other.is_static()) {
525  ::new (static_cast<void *>(&m_st)) s_storage(other.g_st());
526  } else {
527  ::new (static_cast<void *>(&m_dy)) d_storage;
528  mpz_init_set_nlimbs(m_dy, other.g_dy());
529  assert(m_dy._mp_alloc >= 0);
530  }
531  }
532  // Move constructor. Will downgrade other to a static zero integer if other is dynamic.
533  integer_union(integer_union &&other) noexcept
534  {
535  if (other.is_static()) {
536  ::new (static_cast<void *>(&m_st)) s_storage(std::move(other.g_st()));
537  } else {
538  ::new (static_cast<void *>(&m_dy)) d_storage;
539  // NOTE: this copies the mpz struct members (shallow copy).
540  m_dy = other.g_dy();
541  // Downgrade the other to an empty static.
542  other.g_dy().~d_storage();
543  ::new (static_cast<void *>(&other.m_st)) s_storage();
544  }
545  }
546  // Copy assignment operator, performs a deep copy maintaining the storage class.
547  integer_union &operator=(const integer_union &other)
548  {
549  if (mppp_unlikely(this == &other)) {
550  return *this;
551  }
552  const bool s1 = is_static(), s2 = other.is_static();
553  if (s1 && s2) {
554  g_st() = other.g_st();
555  } else if (s1 && !s2) {
556  // Destroy static.
557  g_st().~s_storage();
558  // Construct the dynamic struct.
559  ::new (static_cast<void *>(&m_dy)) d_storage;
560  // Init + assign the mpz.
561  mpz_init_set_nlimbs(m_dy, other.g_dy());
562  assert(m_dy._mp_alloc >= 0);
563  } else if (!s1 && s2) {
564  // Destroy the dynamic this.
565  destroy_dynamic();
566  // Init-copy the static from other.
567  ::new (static_cast<void *>(&m_st)) s_storage(other.g_st());
568  } else {
569  ::mpz_set(&g_dy(), &other.g_dy());
570  }
571  return *this;
572  }
573  // Move assignment, same as above plus possibly steals resources. If this is static
574  // and other is dynamic, other is downgraded to a zero static.
575  integer_union &operator=(integer_union &&other) noexcept
576  {
577  if (mppp_unlikely(this == &other)) {
578  return *this;
579  }
580  const bool s1 = is_static(), s2 = other.is_static();
581  if (s1 && s2) {
582  g_st() = std::move(other.g_st());
583  } else if (s1 && !s2) {
584  // Destroy static.
585  g_st().~s_storage();
586  // Construct the dynamic struct.
587  ::new (static_cast<void *>(&m_dy)) d_storage;
588  m_dy = other.g_dy();
589  // Downgrade the other to an empty static.
590  other.g_dy().~d_storage();
591  ::new (static_cast<void *>(&other.m_st)) s_storage();
592  } else if (!s1 && s2) {
593  // Same as copy assignment: destroy and copy-construct.
594  destroy_dynamic();
595  ::new (static_cast<void *>(&m_st)) s_storage(other.g_st());
596  } else {
597  // Swap with other.
598  ::mpz_swap(&g_dy(), &other.g_dy());
599  }
600  return *this;
601  }
602  ~integer_union()
603  {
604  if (is_static()) {
605  g_st().~s_storage();
606  } else {
607  destroy_dynamic();
608  }
609  }
610  void destroy_dynamic()
611  {
612  assert(!is_static());
613  assert(g_dy()._mp_alloc >= 0);
614  assert(g_dy()._mp_d != nullptr);
615  ::mpz_clear(&g_dy());
616  g_dy().~d_storage();
617  }
618  // Check storage type.
619  bool is_static() const
620  {
621  return m_st._mp_alloc == s_storage::s_alloc;
622  }
623  bool is_dynamic() const
624  {
625  return m_st._mp_alloc != s_storage::s_alloc;
626  }
627  // Getters for st and dy.
628  const s_storage &g_st() const
629  {
630  assert(is_static());
631  return m_st;
632  }
633  s_storage &g_st()
634  {
635  assert(is_static());
636  return m_st;
637  }
638  const d_storage &g_dy() const
639  {
640  assert(is_dynamic());
641  return m_dy;
642  }
643  d_storage &g_dy()
644  {
645  assert(is_dynamic());
646  return m_dy;
647  }
648  // Promotion from static to dynamic. If nlimbs != 0u, allocate nlimbs limbs, otherwise
649  // allocate exactly the nlimbs necessary to represent this.
650  void promote(std::size_t nlimbs = 0u)
651  {
652  assert(is_static());
653  mpz_struct_t tmp_mpz;
654  auto v = g_st().get_mpz_view();
655  if (nlimbs == 0u) {
656  // If nlimbs is zero, we will allocate exactly the needed
657  // number of limbs to represent this.
658  mpz_init_set_nlimbs(tmp_mpz, *v);
659  } else {
660  // Otherwise, we preallocate nlimbs and then set tmp_mpz
661  // to the value of this.
662  mpz_init_nlimbs(tmp_mpz, nlimbs);
663  ::mpz_set(&tmp_mpz, v);
664  }
665  // Destroy static.
666  g_st().~s_storage();
667  // Construct the dynamic struct.
668  ::new (static_cast<void *>(&m_dy)) d_storage;
669  m_dy = tmp_mpz;
670  }
671  // Demotion from dynamic to static.
672  bool demote()
673  {
674  assert(is_dynamic());
675  const auto dyn_size = ::mpz_size(&g_dy());
676  // If the dynamic size is greater than the static size, we cannot demote.
677  if (dyn_size > SSize) {
678  return false;
679  }
680  // Copy over the limbs to temporary storage.
681  std::array<::mp_limb_t, SSize> tmp;
682  copy_limbs_no(g_dy()._mp_d, g_dy()._mp_d + dyn_size, tmp.data());
683  const auto signed_size = g_dy()._mp_size;
684  // Destroy the dynamic storage.
685  destroy_dynamic();
686  // Init the static storage and copy over the data.
687  // NOTE: here the ctor makes sure the static limbs are zeroed
688  // out and we don't get stray limbs when copying below.
689  ::new (static_cast<void *>(&m_st)) s_storage();
690  g_st()._mp_size = signed_size;
691  copy_limbs_no(tmp.data(), tmp.data() + dyn_size, g_st().m_limbs.data());
692  return true;
693  }
694  // NOTE: keep these public as we need them below.
695  s_storage m_st;
696  d_storage m_dy;
697 };
698 }
699 
701 
705 struct zero_division_error final : std::domain_error {
706  using std::domain_error::domain_error;
707 };
708 
709 // NOTE: a few misc things:
710 // - re-visit at one point the issue of the estimators when we need to promote from static to dynamic
711 // in arithmetic ops. Currently they are not 100% optimal since they rely on the information coming out
712 // of the static implementation rather than computing the estimated size of rop themselves, for performance
713 // reasons (see comments);
714 // - maybe the lshift/rshift optimisation for 2 limbs could use dlimb types if available?
715 // - it seems like it might be possible to re-implement a bare-bone mpz api on top of the mpn primitives,
716 // with the goal of providing some form of error recovery in case of memory errors (e.g., throw instead
717 // of aborting). This is probably not an immediate concern though.
718 // - pow() can probably benefit for some specialised static implementation, especially in conjunction with
719 // mpn_sqr().
720 // - gcd() can be improved (see notes).
721 // - the pattern we follow when no mpn primitives are available is to use a static thread local instance of mpz_t
722 // to perform the computation with mpz_ functions and then assign the result to rop. We could probably benefit
723 // from a true assignment operator from mpz_t instead of cting an integer from mpz_t and assigning it (which
724 // is what we are doing now).
726 
868 template <std::size_t SSize>
870 {
871  // Just a small helper, like C++14.
872  template <bool B, typename T = void>
873  using enable_if_t = typename std::enable_if<B, T>::type;
874  // Import these typedefs for ease of use.
875  using s_storage = mppp_impl::static_int<SSize>;
876  using d_storage = mppp_impl::mpz_struct_t;
877  using mpz_size_t = mppp_impl::mpz_size_t;
878  using mpz_raii = mppp_impl::mpz_raii;
879  using mpz_struct_t = mppp_impl::mpz_struct_t;
880 #if defined(MPPP_WITH_LONG_DOUBLE)
881  using mpfr_raii = mppp_impl::mpfr_raii;
882 #endif
883  template <typename T>
884  using is_supported_integral = mppp_impl::is_supported_integral<T>;
885  template <typename T>
886  using is_supported_float = mppp_impl::is_supported_float<T>;
887  template <typename T>
888  using is_supported_interop = mppp_impl::is_supported_interop<T>;
889  template <template <class...> class Op, class... Args>
890  using is_detected = mppp_impl::is_detected<Op, Args...>;
891  template <typename... Args>
892  using conjunction = mppp_impl::conjunction<Args...>;
893  template <typename... Args>
894  using disjunction = mppp_impl::disjunction<Args...>;
895  template <typename T>
896  using negation = mppp_impl::negation<T>;
897  // The underlying static int.
898  using s_int = s_storage;
899  // mpz view class.
900  struct mpz_view {
901  using static_mpz_view = typename s_int::static_mpz_view;
902  explicit mpz_view(const mp_integer &n)
903  : m_static_view(n.is_static() ? n.m_int.g_st().get_mpz_view() : static_mpz_view{}),
904  m_ptr(n.is_static() ? m_static_view : &(n.m_int.g_dy()))
905  {
906  }
907  mpz_view(const mpz_view &) = delete;
908  mpz_view(mpz_view &&other)
909  : m_static_view(std::move(other.m_static_view)),
910  // NOTE: we need to re-init ptr here if other is a static view, because in that case
911  // other.m_ptr will be pointing to data in other and we want it to point to data
912  // in this now.
913  m_ptr((other.m_ptr == other.m_static_view) ? m_static_view : other.m_ptr)
914  {
915  }
916  mpz_view &operator=(const mpz_view &) = delete;
917  mpz_view &operator=(mpz_view &&) = delete;
918  operator const mpz_struct_t *() const
919  {
920  return get();
921  }
922  const mpz_struct_t *get() const
923  {
924  return m_ptr;
925  }
926  static_mpz_view m_static_view;
927  const mpz_struct_t *m_ptr;
928  };
929  // Machinery for the determination of the result of a binary operation.
930  // NOTE: this metaprogramming could be done more cleanly using expr. SFINAE
931  // on the internal dispatching functions, but this generates an error in MSVC about
932  // the operator being defined twice. My impression is that this is an MSVC problem at the
933  // intersection between SFINAE and friend function templates (GCC and clang work fine).
934  // We adopt this construction as a workaround.
935  template <typename, typename, typename = void>
936  struct common_type {
937  };
938  template <typename T>
939  struct common_type<T, T, enable_if_t<std::is_same<T, mp_integer>::value>> {
940  using type = mp_integer;
941  };
942  template <typename T, typename U>
943  struct common_type<T, U, enable_if_t<conjunction<std::is_same<T, mp_integer>, is_supported_integral<U>>::value>> {
944  using type = mp_integer;
945  };
946  template <typename T, typename U>
947  struct common_type<T, U, enable_if_t<conjunction<std::is_same<U, mp_integer>, is_supported_integral<T>>::value>> {
948  using type = mp_integer;
949  };
950  template <typename T, typename U>
951  struct common_type<T, U, enable_if_t<conjunction<std::is_same<T, mp_integer>, is_supported_float<U>>::value>> {
952  using type = U;
953  };
954  template <typename T, typename U>
955  struct common_type<T, U, enable_if_t<conjunction<std::is_same<U, mp_integer>, is_supported_float<T>>::value>> {
956  using type = T;
957  };
958  template <typename T, typename U>
959  using common_t = typename common_type<T, U>::type;
960  // Enabler for in-place arithmetic ops.
961  template <typename T>
962  using in_place_enabler = enable_if_t<disjunction<is_supported_interop<T>, std::is_same<T, mp_integer>>::value, int>;
963 #if defined(_MSC_VER)
964  // Common metaprogramming for bit shifting operators.
965  // NOTE: here and elsewhere we special case MSVC because we need to alter the SFINAE style, as the usual
966  // approach (i.e., default template int argument) results in ICEs in MSVC. Instead, on MSCV we use SFINAE
967  // on the return type.
968  template <typename T>
969  using shift_op_enabler = enable_if_t<is_supported_integral<T>::value, mp_integer>;
970 #else
971  template <typename T>
972  using shift_op_enabler = enable_if_t<is_supported_integral<T>::value, int>;
973 #endif
974  template <typename T, enable_if_t<std::is_unsigned<T>::value, int> = 0>
975  static ::mp_bitcnt_t cast_to_bitcnt(T n)
976  {
977  if (mppp_unlikely(n > std::numeric_limits<::mp_bitcnt_t>::max())) {
978  throw std::domain_error("Cannot bit shift by " + std::to_string(n) + ": the value is too large");
979  }
980  return static_cast<::mp_bitcnt_t>(n);
981  }
982  template <typename T, enable_if_t<std::is_signed<T>::value, int> = 0>
983  static ::mp_bitcnt_t cast_to_bitcnt(T n)
984  {
985  if (mppp_unlikely(n < T(0))) {
986  throw std::domain_error("Cannot bit shift by " + std::to_string(n) + ": negative values are not supported");
987  }
988  if (mppp_unlikely(static_cast<typename std::make_unsigned<T>::type>(n)
989  > std::numeric_limits<::mp_bitcnt_t>::max())) {
990  throw std::domain_error("Cannot bit shift by " + std::to_string(n) + ": the value is too large");
991  }
992  return static_cast<::mp_bitcnt_t>(n);
993  }
994 
995 public:
997  static constexpr std::size_t ssize = SSize;
999 
1002  mp_integer() = default;
1004 
1009  mp_integer(const mp_integer &other) = default;
1011 
1017  mp_integer(mp_integer &&other) = default;
1018 
1019 private:
1020  // Enabler for generic ctor.
1021  template <typename T>
1022  using generic_ctor_enabler = enable_if_t<is_supported_interop<T>::value, int>;
1023  // Enabler for generic assignment.
1024  template <typename T>
1025  using generic_assignment_enabler = generic_ctor_enabler<T>;
1026  // Add a limb at the top of the integer (that is, the new limb will be the new most
1027  // significant limb). Requires a non-negative integer. This is used only during generic construction,
1028  // do **NOT** use it anywhere else.
1029  void push_limb(::mp_limb_t l)
1030  {
1031  const auto asize = m_int.m_st._mp_size;
1032  assert(asize >= 0);
1033  if (is_static()) {
1034  if (std::size_t(asize) < SSize) {
1035  // If there's space for an extra limb, write it, update the size,
1036  // and return.
1037  ++m_int.g_st()._mp_size;
1038  m_int.g_st().m_limbs[std::size_t(asize)] = l;
1039  return;
1040  }
1041  // Store the upper limb.
1042  const auto limb_copy = m_int.g_st().m_limbs[SSize - 1u];
1043  // Make sure the upper limb contains something.
1044  // NOTE: This is necessary because this method is used while building an integer during generic
1045  // construction, and it might be possible that the current top limb is zero. If that is the case,
1046  // the promotion below triggers an assertion failure when destroying
1047  // the static int in debug mode.
1048  m_int.g_st().m_limbs[SSize - 1u] = 1u;
1049  // There's no space for the extra limb. Promote the integer and move on.
1050  m_int.promote(SSize + 1u);
1051  // Recover the upper limb.
1052  m_int.g_dy()._mp_d[SSize - 1u] = limb_copy;
1053  }
1054  if (m_int.g_dy()._mp_alloc == asize) {
1055  // There is not enough space for the extra limb, we need to reallocate.
1056  // NOTE: same as above, make sure the top limb contains something. mpz_realloc2() seems not to care,
1057  // but better safe than sorry.
1058  // NOTE: in practice this is never hit on current architectures: the only case we end up here is initing
1059  // with a 64bit integer when the limb is 32bit, but in that case we already promoted to size 2 above.
1060  const auto limb_copy = m_int.g_dy()._mp_d[asize - 1];
1061  m_int.g_dy()._mp_d[asize - 1] = 1u;
1062  ::mpz_realloc2(&m_int.g_dy(), static_cast<::mp_bitcnt_t>(asize) * 2u * GMP_NUMB_BITS);
1063  if (mppp_unlikely(m_int.g_dy()._mp_alloc / 2 != asize)) {
1064  // This means that there was some overflow in the determination of the bit size above.
1065  std::abort();
1066  }
1067  m_int.g_dy()._mp_d[asize - 1] = limb_copy;
1068  }
1069  // Write the extra limb, update the size.
1070  ++m_int.g_dy()._mp_size;
1071  m_int.g_dy()._mp_d[asize] = l;
1072  }
1073  // This is a small utility function to shift down the unsigned integer n by GMP_NUMB_BITS.
1074  // If GMP_NUMB_BITS is not smaller than the bit size of T, then an assertion will fire. We need this
1075  // little helper in order to avoid compiler warnings.
1076  template <typename T, enable_if_t<(GMP_NUMB_BITS < unsigned(std::numeric_limits<T>::digits)), int> = 0>
1077  static void checked_rshift(T &n)
1078  {
1079  n >>= GMP_NUMB_BITS;
1080  }
1081  template <typename T, enable_if_t<(GMP_NUMB_BITS >= unsigned(std::numeric_limits<T>::digits)), int> = 0>
1082  static void checked_rshift(T &)
1083  {
1084  assert(false);
1085  }
1086  // Dispatch for generic constructor.
1087  template <typename T, enable_if_t<conjunction<std::is_integral<T>, std::is_unsigned<T>>::value, int> = 0>
1088  void dispatch_generic_ctor(T n)
1089  {
1090  auto nu = static_cast<unsigned long long>(n);
1091  while (nu) {
1092  push_limb(static_cast<::mp_limb_t>(nu & GMP_NUMB_MASK));
1093  if (GMP_NUMB_BITS >= unsigned(std::numeric_limits<unsigned long long>::digits)) {
1094  break;
1095  }
1096  checked_rshift(nu);
1097  }
1098  }
1099  template <typename T, enable_if_t<conjunction<std::is_integral<T>, std::is_signed<T>>::value, int> = 0>
1100  void dispatch_generic_ctor(T n)
1101  {
1102  // NOTE: here we are using cast + unary minus to extract the abs value of n as an unsigned long long. See:
1103  // http://stackoverflow.com/questions/4536095/unary-minus-and-signed-to-unsigned-conversion
1104  // When operating on a negative value, this technique is not 100% portable: it requires an implementation
1105  // of signed integers such that the absolute value of the minimum (negative) value is not greater than
1106  // the maximum value of the unsigned counterpart. This is guaranteed on all computer architectures in use today,
1107  // but in theory there could be architectures where the assumption is not respected. See for instance the
1108  // discussion here:
1109  // http://stackoverflow.com/questions/11372044/unsigned-vs-signed-range-guarantees
1110  // Note that in any case we never run into UB, the only consequence is that for very large negative values
1111  // we could init the integer with the wrong value. We should be able to test for this in the unit tests.
1112  // Let's keep this in mind in the remote case this ever becomes a problem. In such case, we would probably need
1113  // to specialise the implementation for negative values to use bit shifting and modulo operations.
1114  const auto nu = static_cast<unsigned long long>(n);
1115  if (n >= T(0)) {
1116  dispatch_generic_ctor(nu);
1117  } else {
1118  dispatch_generic_ctor(-nu);
1119  neg();
1120  }
1121  }
1122  template <typename T, enable_if_t<disjunction<std::is_same<T, float>, std::is_same<T, double>>::value, int> = 0>
1123  void dispatch_generic_ctor(T x)
1124  {
1125  if (mppp_unlikely(!std::isfinite(x))) {
1126  throw std::domain_error("Cannot init integer from the non-finite floating-point value "
1127  + std::to_string(x));
1128  }
1129  MPPP_MAYBE_TLS mpz_raii tmp;
1130  ::mpz_set_d(&tmp.m_mpz, static_cast<double>(x));
1131  dispatch_mpz_ctor(&tmp.m_mpz);
1132  }
1133 #if defined(MPPP_WITH_LONG_DOUBLE)
1134  template <typename T, enable_if_t<std::is_same<T, long double>::value, int> = 0>
1135  void dispatch_generic_ctor(T x)
1136  {
1137  if (mppp_unlikely(!std::isfinite(x))) {
1138  throw std::domain_error("Cannot init integer from the non-finite floating-point value "
1139  + std::to_string(x));
1140  }
1141  MPPP_MAYBE_TLS mpfr_raii mpfr;
1142  MPPP_MAYBE_TLS mpz_raii tmp;
1143  // NOTE: static checks for overflows are done above.
1144  constexpr int d2 = std::numeric_limits<long double>::digits10 * 4;
1145  ::mpfr_set_prec(&mpfr.m_mpfr, static_cast<::mpfr_prec_t>(d2));
1146  ::mpfr_set_ld(&mpfr.m_mpfr, x, MPFR_RNDN);
1147  ::mpfr_get_z(&tmp.m_mpz, &mpfr.m_mpfr, MPFR_RNDZ);
1148  dispatch_mpz_ctor(&tmp.m_mpz);
1149  }
1150 #endif
1151  // Ctor from mpz_t.
1152  void dispatch_mpz_ctor(const ::mpz_t n)
1153  {
1154  const auto asize = ::mpz_size(n);
1155  if (asize > SSize) {
1156  // Too big, need to promote.
1157  // Destroy static.
1158  m_int.g_st().~s_storage();
1159  // Init dynamic.
1160  ::new (static_cast<void *>(&m_int.m_dy)) d_storage;
1161  mppp_impl::mpz_init_set_nlimbs(m_int.m_dy, *n);
1162  } else {
1163  // All this is noexcept.
1164  // NOTE: m_st() inits to zero the whole array of static limbs, and we copy
1165  // in only the used limbs.
1166  m_int.g_st()._mp_size = n->_mp_size;
1167  mppp_impl::copy_limbs_no(n->_mp_d, n->_mp_d + asize, m_int.g_st().m_limbs.data());
1168  }
1169  }
1170 
1171 public:
1173 
1185  template <typename T, generic_ctor_enabler<T> = 0>
1186  explicit mp_integer(T x) : m_int()
1187  {
1188  dispatch_generic_ctor(x);
1189  }
1191 
1205  explicit mp_integer(const char *s, int base = 10) : m_int()
1206  {
1207  if (mppp_unlikely(base != 0 && (base < 2 || base > 62))) {
1208  throw std::invalid_argument(
1209  "In the constructor from string, a base of " + std::to_string(base)
1210  + " was specified, but the only valid values are 0 and any value in the [2,62] range");
1211  }
1212  MPPP_MAYBE_TLS mpz_raii mpz;
1213  if (mppp_unlikely(::mpz_set_str(&mpz.m_mpz, s, base))) {
1214  if (base) {
1215  throw std::invalid_argument(std::string("The string '") + s + "' is not a valid integer in base "
1216  + std::to_string(base));
1217  } else {
1218  throw std::invalid_argument(std::string("The string '") + s
1219  + "' is not a valid integer any supported base");
1220  }
1221  }
1222  dispatch_mpz_ctor(&mpz.m_mpz);
1223  }
1225 
1231  explicit mp_integer(const std::string &s, int base = 10) : mp_integer(s.c_str(), base)
1232  {
1233  }
1235 
1244  explicit mp_integer(const ::mpz_t n) : m_int()
1245  {
1246  dispatch_mpz_ctor(n);
1247  }
1249 
1256  mp_integer &operator=(const mp_integer &other) = default;
1258 
1266  mp_integer &operator=(mp_integer &&other) = default;
1268 
1283  template <typename T, generic_assignment_enabler<T> = 0>
1284  mp_integer &operator=(const T &x)
1285  {
1286  return *this = mp_integer{x};
1287  }
1289 
1292  bool is_static() const
1293  {
1294  return m_int.is_static();
1295  }
1297 
1300  bool is_dynamic() const
1301  {
1302  return m_int.is_dynamic();
1303  }
1305 
1316  friend std::ostream &operator<<(std::ostream &os, const mp_integer &n)
1317  {
1318  return os << n.to_string();
1319  }
1321 
1331  friend std::istream &operator>>(std::istream &is, mp_integer &n)
1332  {
1333  MPPP_MAYBE_TLS std::string tmp_str;
1334  std::getline(is, tmp_str);
1335  n = mp_integer{tmp_str};
1336  return is;
1337  }
1339 
1350  std::string to_string(int base = 10) const
1351  {
1352  if (mppp_unlikely(base < 2 || base > 62)) {
1353  throw std::invalid_argument("Invalid base for string conversion: the base must be between "
1354  "2 and 62, but a value of "
1355  + std::to_string(base) + " was provided instead");
1356  }
1357  return mppp_impl::mpz_to_str(get_mpz_view(), base);
1358  }
1359  // NOTE: maybe provide a method to access the lower-level str conversion that writes to
1360  // std::vector<char>?
1361 
1362 private:
1363  // Implementation of the conversion operator.
1364  template <typename T>
1365  using generic_conversion_enabler = generic_ctor_enabler<T>;
1366  // Conversion to bool.
1367  template <typename T, enable_if_t<std::is_same<bool, T>::value, int> = 0>
1368  std::pair<bool, T> dispatch_conversion() const
1369  {
1370  return {true, m_int.m_st._mp_size != 0};
1371  }
1372  // Try to convert this to an unsigned long long. The abs value of this will be considered for the conversion.
1373  // Requires nonzero this.
1374  std::pair<bool, unsigned long long> convert_to_ull() const
1375  {
1376  const auto asize = size();
1377  assert(asize);
1378  // Get the pointer to the limbs.
1379  const ::mp_limb_t *ptr = is_static() ? m_int.g_st().m_limbs.data() : m_int.g_dy()._mp_d;
1380  // Init retval with the first limb.
1381  auto retval = static_cast<unsigned long long>(ptr[0] & GMP_NUMB_MASK);
1382  // Add the other limbs, if any.
1383  unsigned long long shift = GMP_NUMB_BITS;
1384  constexpr unsigned ull_bits = std::numeric_limits<unsigned long long>::digits;
1385  for (std::size_t i = 1u; i < asize; ++i, shift += GMP_NUMB_BITS) {
1386  if (shift >= ull_bits) {
1387  // We need to shift left the current limb. If the shift is too large, we run into UB
1388  // and it also means that the value does not fit in unsigned long long (and, by extension,
1389  // in any other unsigned integral type).
1390  return {false, 0ull};
1391  }
1392  const auto l = static_cast<unsigned long long>(ptr[i] & GMP_NUMB_MASK);
1393  if (l >> (ull_bits - shift)) {
1394  // Shifting the current limb is well-defined from the point of view of the language, but the result
1395  // wraps around: the value does not fit in unsigned long long.
1396  // NOTE: I suspect this branch can be triggered on common architectures only with nail builds.
1397  return {false, 0ull};
1398  }
1399  // This will not overflow, as there is no carry from retval and l << shift is fine.
1400  retval += l << shift;
1401  }
1402  return {true, retval};
1403  }
1404  // Conversion to unsigned ints, excluding bool.
1405  template <typename T,
1406  enable_if_t<conjunction<std::is_integral<T>, std::is_unsigned<T>, negation<std::is_same<bool, T>>>::value,
1407  int> = 0>
1408  std::pair<bool, T> dispatch_conversion() const
1409  {
1410  // Handle zero.
1411  if (!m_int.m_st._mp_size) {
1412  return {true, T(0)};
1413  }
1414  // Handle negative value.
1415  if (m_int.m_st._mp_size < 0) {
1416  return {false, T(0)};
1417  }
1418  // Attempt conversion to ull.
1419  const auto candidate = convert_to_ull();
1420  if (!candidate.first) {
1421  // The conversion to ull failed.
1422  return {false, T(0)};
1423  }
1424  if (candidate.second > std::numeric_limits<T>::max()) {
1425  // The conversion to ull succeeded, but the value exceeds the limit of T.
1426  return {false, T(0)};
1427  }
1428  // The conversion to the target unsigned integral type is fine.
1429  return {true, static_cast<T>(candidate.second)};
1430  }
1431  // Conversion to signed ints.
1432  template <typename T, enable_if_t<conjunction<std::is_integral<T>, std::is_signed<T>>::value, int> = 0>
1433  std::pair<bool, T> dispatch_conversion() const
1434  {
1435  // NOTE: here we will assume that unsigned long long can represent the absolute value of any
1436  // machine integer. This is not necessarily the case on any possible implementation, but it holds
1437  // true on any current architecture. See the comments below and in the generic constructor regarding
1438  // the method of computing the absolute value of a negative int.
1439  using uT = typename std::make_unsigned<T>::type;
1440  // Handle zero.
1441  if (!m_int.m_st._mp_size) {
1442  return {true, T(0)};
1443  }
1444  // Attempt conversion to ull.
1445  const auto candidate = convert_to_ull();
1446  if (!candidate.first) {
1447  // The conversion to ull failed: the value is too large to be represented by any integer type.
1448  return {false, T(0)};
1449  }
1450  if (m_int.m_st._mp_size > 0) {
1451  // If the value is positive, we check that the candidate does not exceed the
1452  // max value of the type.
1453  if (candidate.second > uT(std::numeric_limits<T>::max())) {
1454  return {false, T(0)};
1455  }
1456  return {true, static_cast<T>(candidate.second)};
1457  } else {
1458  // For negative values, we need to establish if the value fits the negative range of the
1459  // target type, and we must make sure to take the negative of the candidate correctly.
1460  // NOTE: see the comments in the generic ctor about the portability of this technique
1461  // for computing the absolute value.
1462  constexpr auto min_abs = -static_cast<unsigned long long>(std::numeric_limits<T>::min());
1463  constexpr auto max = static_cast<unsigned long long>(std::numeric_limits<T>::max());
1464  if (candidate.second > min_abs) {
1465  // The value is too small.
1466  return {false, T(0)};
1467  }
1468  // The abs of min might be greater than max. The idea then is that we decrease the magnitude
1469  // of the candidate to the safe negation range via division, we negate it and then we recover the
1470  // original candidate value. If the abs of min is not greater than max, ceil_ratio will be 1, r will be zero
1471  // and everything will still work.
1472  constexpr auto ceil_ratio = min_abs / max + unsigned((min_abs % max) != 0u);
1473  const unsigned long long q = candidate.second / ceil_ratio, r = candidate.second % ceil_ratio;
1474  auto retval = static_cast<T>(-static_cast<T>(q));
1475  static_assert(ceil_ratio <= uT(std::numeric_limits<T>::max()), "Overflow error.");
1476  retval = static_cast<T>(retval * static_cast<T>(ceil_ratio));
1477  retval = static_cast<T>(retval - static_cast<T>(r));
1478  return {true, retval};
1479  }
1480  }
1481  // Implementation of the conversion to floating-point through GMP/MPFR routines.
1482  template <typename T, enable_if_t<disjunction<std::is_same<T, float>, std::is_same<T, double>>::value, int> = 0>
1483  static std::pair<bool, T> mpz_float_conversion(const mpz_struct_t &m)
1484  {
1485  return {true, static_cast<T>(::mpz_get_d(&m))};
1486  }
1487 #if defined(MPPP_WITH_LONG_DOUBLE)
1488  template <typename T, enable_if_t<std::is_same<T, long double>::value, int> = 0>
1489  static std::pair<bool, T> mpz_float_conversion(const mpz_struct_t &m)
1490  {
1491  MPPP_MAYBE_TLS mpfr_raii mpfr;
1492  constexpr int d2 = std::numeric_limits<long double>::digits10 * 4;
1493  ::mpfr_set_prec(&mpfr.m_mpfr, static_cast<::mpfr_prec_t>(d2));
1494  ::mpfr_set_z(&mpfr.m_mpfr, &m, MPFR_RNDN);
1495  return {true, ::mpfr_get_ld(&mpfr.m_mpfr, MPFR_RNDN)};
1496  }
1497 #endif
1498  // Conversion to floating-point.
1499  template <typename T, enable_if_t<std::is_floating_point<T>::value, int> = 0>
1500  std::pair<bool, T> dispatch_conversion() const
1501  {
1502  // Handle zero.
1503  if (!m_int.m_st._mp_size) {
1504  return {true, T(0)};
1505  }
1506  if (std::numeric_limits<T>::is_iec559) {
1507  // Optimization for single-limb integers.
1508  //
1509  // NOTE: the reasoning here is as follows. If the floating-point type has infinity,
1510  // then its "range" is the whole real line. The C++ standard guarantees that:
1511  // """
1512  // A prvalue of an integer type or of an unscoped enumeration type can be converted to a prvalue of a
1513  // floating point type. The result is exact if possible. If the value being converted is in the range
1514  // of values that can be represented but the value cannot be represented exactly,
1515  // it is an implementation-defined choice of either the next lower or higher representable value. If the
1516  // value being converted is outside the range of values that can be represented, the behavior is undefined.
1517  // """
1518  // This seems to indicate that if the limb value "overflows" the finite range of the floating-point type, we
1519  // will get either the max/min finite value or +-inf. Additionally, the IEEE standard seems to indicate
1520  // that an overflowing conversion will produce infinity:
1521  // http://stackoverflow.com/questions/40694384/integer-to-float-conversions-with-ieee-fp
1522  //
1523  // Get the pointer to the limbs.
1524  const ::mp_limb_t *ptr = is_static() ? m_int.g_st().m_limbs.data() : m_int.g_dy()._mp_d;
1525  if (m_int.m_st._mp_size == 1) {
1526  return {true, static_cast<T>(ptr[0] & GMP_NUMB_MASK)};
1527  }
1528  if (m_int.m_st._mp_size == -1) {
1529  return {true, -static_cast<T>(ptr[0] & GMP_NUMB_MASK)};
1530  }
1531  }
1532  // For all the other cases, just delegate to the GMP/MPFR routines.
1533  return mpz_float_conversion<T>(*static_cast<const mpz_struct_t *>(get_mpz_view()));
1534  }
1535 
1536 public:
1538 
1549  template <typename T, generic_conversion_enabler<T> = 0>
1550  explicit operator T() const
1551  {
1552  const auto retval = dispatch_conversion<T>();
1553  if (mppp_unlikely(!retval.first)) {
1554  throw std::overflow_error("Conversion of the integer " + to_string() + " to the type " + typeid(T).name()
1555  + " results in overflow");
1556  }
1557  return retval.second;
1558  }
1560 
1566  bool promote()
1567  {
1568  if (is_static()) {
1569  m_int.promote();
1570  return true;
1571  }
1572  return false;
1573  }
1575 
1581  bool demote()
1582  {
1583  if (is_dynamic()) {
1584  return m_int.demote();
1585  }
1586  return false;
1587  }
1589 
1592  std::size_t nbits() const
1593  {
1594  return m_int.m_st._mp_size ? ::mpz_sizeinbase(get_mpz_view(), 2) : 0u;
1595  }
1597 
1600  std::size_t size() const
1601  {
1602  if (is_static()) {
1603  return std::size_t(m_int.g_st().abs_size());
1604  }
1605  return ::mpz_size(&m_int.g_dy());
1606  }
1608 
1611  int sgn() const
1612  {
1613  // NOTE: size is part of the common initial sequence.
1614  if (m_int.m_st._mp_size != 0) {
1615  return m_int.m_st._mp_size > 0 ? 1 : -1;
1616  } else {
1617  return 0;
1618  }
1619  }
1621 
1626  friend int sgn(const mp_integer &n)
1627  {
1628  return n.sgn();
1629  }
1631 
1652  mpz_view get_mpz_view() const
1653  {
1654  return mpz_view(*this);
1655  }
1657 
1663  {
1664  if (is_static()) {
1665  m_int.g_st()._mp_size = -m_int.g_st()._mp_size;
1666  } else {
1667  ::mpz_neg(&m_int.g_dy(), &m_int.g_dy());
1668  }
1669  return *this;
1670  }
1672 
1678  friend void neg(mp_integer &rop, const mp_integer &n)
1679  {
1680  rop = n;
1681  rop.neg();
1682  }
1684 
1689  friend mp_integer neg(const mp_integer &n)
1690  {
1691  mp_integer ret(n);
1692  ret.neg();
1693  return ret;
1694  }
1695 
1696 private:
1697  // Metaprogramming for selecting the algorithm for static addition. The selection happens via
1698  // an std::integral_constant with 3 possible values:
1699  // - 0 (default case): use the GMP mpn functions,
1700  // - 1: selected when there are no nail bits and the static size is 1,
1701  // - 2: selected when there are no nail bits and the static size is 2.
1702  template <typename SInt>
1703  using static_add_algo = std::integral_constant<int, (!GMP_NAIL_BITS && SInt::s_size == 1)
1704  ? 1
1705  : ((!GMP_NAIL_BITS && SInt::s_size == 2) ? 2 : 0)>;
1706  // General implementation via mpn.
1707  // Small helper to compute the size after subtraction via mpn. s is a strictly positive size.
1708  static mpz_size_t sub_compute_size(const ::mp_limb_t *rdata, mpz_size_t s)
1709  {
1710  assert(s > 0);
1711  mpz_size_t cur_idx = s - 1;
1712  for (; cur_idx >= 0; --cur_idx) {
1713  if (rdata[cur_idx] & GMP_NUMB_MASK) {
1714  break;
1715  }
1716  }
1717  return cur_idx + 1;
1718  }
1719  // NOTE: this function (and its other overloads) will return true in case of success, false in case of failure
1720  // (i.e., the addition overflows and the static size is not enough).
1721  static bool static_add_impl(s_int &rop, const s_int &op1, const s_int &op2, mpz_size_t asize1, mpz_size_t asize2,
1722  int sign1, int sign2, const std::integral_constant<int, 0> &)
1723  {
1724  using mppp_impl::copy_limbs;
1725  auto rdata = &rop.m_limbs[0];
1726  auto data1 = &op1.m_limbs[0], data2 = &op2.m_limbs[0];
1727  const auto size1 = op1._mp_size;
1728  // NOTE: cannot trust the size member from op2, as op2 could've been negated if
1729  // we are actually subtracting.
1730  const auto size2 = (sign2 >= 0) ? asize2 : -asize2;
1731  // mpn functions require nonzero arguments.
1732  if (mppp_unlikely(!sign2)) {
1733  rop._mp_size = size1;
1734  copy_limbs(data1, data1 + asize1, rdata);
1735  return true;
1736  }
1737  if (mppp_unlikely(!sign1)) {
1738  rop._mp_size = size2;
1739  copy_limbs(data2, data2 + asize2, rdata);
1740  return true;
1741  }
1742  // Check, for op1 and op2, whether:
1743  // - the asize is the max static size, and, if yes,
1744  // - the highest bit in the highest limb is set.
1745  // If this condition holds for op1 and op2, we return failure, as the computation might require
1746  // an extra limb.
1747  // NOTE: the reason we check this is that we do not want to run into the situation in which we have written
1748  // something into rop (via the mpn functions below), only to realize later that the computation overflows.
1749  // This would be bad because, in case of overlapping arguments, it would destroy one or two operands without
1750  // possibility of recovering. The alternative would be to do the computation in some local buffer and then
1751  // copy
1752  // it out, but that is rather costly. Note that this means that in principle a computation that could fit in
1753  // static storage ends up triggering a promotion.
1754  const bool c1 = std::size_t(asize1) == SSize && ((data1[asize1 - 1] & GMP_NUMB_MASK) >> (GMP_NUMB_BITS - 1));
1755  const bool c2 = std::size_t(asize2) == SSize && ((data2[asize2 - 1] & GMP_NUMB_MASK) >> (GMP_NUMB_BITS - 1));
1756  if (mppp_unlikely(c1 || c2)) {
1757  return false;
1758  }
1759  if (sign1 == sign2) {
1760  // Same sign.
1761  if (asize1 >= asize2) {
1762  // The number of limbs of op1 >= op2.
1763  ::mp_limb_t cy;
1764  if (asize2 == 1) {
1765  // NOTE: we are not masking data2[0] with GMP_NUMB_MASK, I am assuming the mpn function
1766  // is able to deal with a limb with a nail.
1767  cy = ::mpn_add_1(rdata, data1, static_cast<::mp_size_t>(asize1), data2[0]);
1768  } else if (asize1 == asize2) {
1769  cy = ::mpn_add_n(rdata, data1, data2, static_cast<::mp_size_t>(asize1));
1770  } else {
1771  cy = ::mpn_add(rdata, data1, static_cast<::mp_size_t>(asize1), data2,
1772  static_cast<::mp_size_t>(asize2));
1773  }
1774  if (cy) {
1775  assert(asize1 < s_int::s_size);
1776  rop._mp_size = size1 + sign1;
1777  // NOTE: there should be no need to use GMP_NUMB_MASK here.
1778  rdata[asize1] = 1u;
1779  } else {
1780  // Without carry, the size is unchanged.
1781  rop._mp_size = size1;
1782  }
1783  } else {
1784  // The number of limbs of op2 > op1.
1785  ::mp_limb_t cy;
1786  if (asize1 == 1) {
1787  cy = ::mpn_add_1(rdata, data2, static_cast<::mp_size_t>(asize2), data1[0]);
1788  } else {
1789  cy = ::mpn_add(rdata, data2, static_cast<::mp_size_t>(asize2), data1,
1790  static_cast<::mp_size_t>(asize1));
1791  }
1792  if (cy) {
1793  assert(asize2 < s_int::s_size);
1794  rop._mp_size = size2 + sign2;
1795  rdata[asize2] = 1u;
1796  } else {
1797  rop._mp_size = size2;
1798  }
1799  }
1800  } else {
1801  if (asize1 > asize2
1802  || (asize1 == asize2 && ::mpn_cmp(data1, data2, static_cast<::mp_size_t>(asize1)) >= 0)) {
1803  // abs(op1) >= abs(op2).
1804  ::mp_limb_t br;
1805  if (asize2 == 1) {
1806  br = ::mpn_sub_1(rdata, data1, static_cast<::mp_size_t>(asize1), data2[0]);
1807  } else if (asize1 == asize2) {
1808  br = ::mpn_sub_n(rdata, data1, data2, static_cast<::mp_size_t>(asize1));
1809  } else {
1810  br = ::mpn_sub(rdata, data1, static_cast<::mp_size_t>(asize1), data2,
1811  static_cast<::mp_size_t>(asize2));
1812  }
1813  assert(!br);
1814  rop._mp_size = sub_compute_size(rdata, asize1);
1815  if (sign1 != 1) {
1816  rop._mp_size = -rop._mp_size;
1817  }
1818  } else {
1819  // abs(op2) > abs(op1).
1820  ::mp_limb_t br;
1821  if (asize1 == 1) {
1822  br = ::mpn_sub_1(rdata, data2, static_cast<::mp_size_t>(asize2), data1[0]);
1823  } else {
1824  br = ::mpn_sub(rdata, data2, static_cast<::mp_size_t>(asize2), data1,
1825  static_cast<::mp_size_t>(asize1));
1826  }
1827  assert(!br);
1828  rop._mp_size = sub_compute_size(rdata, asize2);
1829  if (sign2 != 1) {
1830  rop._mp_size = -rop._mp_size;
1831  }
1832  }
1833  }
1834  return true;
1835  }
1836  // Optimization for single-limb statics with no nails.
1837  static bool static_add_impl(s_int &rop, const s_int &op1, const s_int &op2, mpz_size_t asize1, mpz_size_t asize2,
1838  int sign1, int sign2, const std::integral_constant<int, 1> &)
1839  {
1840  using mppp_impl::limb_add_overflow;
1841  auto rdata = &rop.m_limbs[0];
1842  auto data1 = &op1.m_limbs[0], data2 = &op2.m_limbs[0];
1843  // NOTE: both asizes have to be 0 or 1 here.
1844  assert((asize1 == 1 && data1[0] != 0u) || (asize1 == 0 && data1[0] == 0u));
1845  assert((asize2 == 1 && data2[0] != 0u) || (asize2 == 0 && data2[0] == 0u));
1846  ::mp_limb_t tmp;
1847  if (sign1 == sign2) {
1848  // When the signs are identical, we can implement addition as a true addition.
1849  if (mppp_unlikely(limb_add_overflow(data1[0], data2[0], &tmp))) {
1850  return false;
1851  }
1852  // Assign the output. asize can be zero (sign1 == sign2 == 0) or 1.
1853  rop._mp_size = sign1;
1854  rdata[0] = tmp;
1855  } else {
1856  // When the signs differ, we need to implement addition as a subtraction.
1857  // NOTE: this also includes the case in which only one of the operands is zero.
1858  if (data1[0] >= data2[0]) {
1859  // op1 is not smaller than op2.
1860  tmp = data1[0] - data2[0];
1861  // asize is either 1 or 0 (0 iff abs(op1) == abs(op2)).
1862  rop._mp_size = sign1;
1863  if (mppp_unlikely(!tmp)) {
1864  rop._mp_size = 0;
1865  }
1866  rdata[0] = tmp;
1867  } else {
1868  // NOTE: this has to be one, as data2[0] and data1[0] cannot be equal.
1869  rop._mp_size = sign2;
1870  rdata[0] = data2[0] - data1[0];
1871  }
1872  }
1873  return true;
1874  }
1875  // Optimization for two-limbs statics with no nails.
1876  // Small helper to compare two statics of equal asize 1 or 2.
1877  static int compare_limbs_2(const ::mp_limb_t *data1, const ::mp_limb_t *data2, mpz_size_t asize)
1878  {
1879  // NOTE: this requires no nail bits.
1880  assert(!GMP_NAIL_BITS);
1881  assert(asize == 1 || asize == 2);
1882  // Start comparing from the top.
1883  auto cmp_idx = asize - 1;
1884  if (data1[cmp_idx] != data2[cmp_idx]) {
1885  return data1[cmp_idx] > data2[cmp_idx] ? 1 : -1;
1886  }
1887  // The top limbs are equal, move down one limb.
1888  // If we are already at the bottom limb, it means the two numbers are equal.
1889  if (!cmp_idx) {
1890  return 0;
1891  }
1892  --cmp_idx;
1893  if (data1[cmp_idx] != data2[cmp_idx]) {
1894  return data1[cmp_idx] > data2[cmp_idx] ? 1 : -1;
1895  }
1896  return 0;
1897  }
1898  static bool static_add_impl(s_int &rop, const s_int &op1, const s_int &op2, mpz_size_t asize1, mpz_size_t asize2,
1899  int sign1, int sign2, const std::integral_constant<int, 2> &)
1900  {
1901  using mppp_impl::limb_add_overflow;
1902  auto rdata = &rop.m_limbs[0];
1903  auto data1 = &op1.m_limbs[0], data2 = &op2.m_limbs[0];
1904  if (sign1 == sign2) {
1905  // NOTE: this handles the case in which the numbers have the same sign, including 0 + 0.
1906  //
1907  // NOTE: this is the implementation for 2 limbs, even if potentially the operands have 1 limb.
1908  // The idea here is that it's better to do a few computations more rather than paying the branching
1909  // cost. 1-limb operands will have the upper limb set to zero from the zero-initialization of
1910  // the limbs of static ints.
1911  //
1912  // NOTE: the rop hi limb might spill over either from the addition of the hi limbs
1913  // of op1 and op2, or by the addition of carry coming over from the addition of
1914  // the lo limbs of op1 and op2.
1915  //
1916  // Add the hi and lo limbs.
1917  const auto a = data1[0], b = data2[0], c = data1[1], d = data2[1];
1918  ::mp_limb_t lo, hi1, hi2;
1919  const ::mp_limb_t cy_lo = limb_add_overflow(a, b, &lo), cy_hi1 = limb_add_overflow(c, d, &hi1),
1920  cy_hi2 = limb_add_overflow(hi1, cy_lo, &hi2);
1921  // The result will overflow if we have any carry relating to the high limb.
1922  if (mppp_unlikely(cy_hi1 || cy_hi2)) {
1923  return false;
1924  }
1925  // For the size compuation:
1926  // - if sign1 == 0, size is zero,
1927  // - if sign1 == +-1, then the size is either +-1 or +-2: asize is 2 if the result
1928  // has a nonzero 2nd limb, otherwise asize is 1.
1929  rop._mp_size = sign1;
1930  if (hi2) {
1931  rop._mp_size = sign1 + sign1;
1932  }
1933  rdata[0] = lo;
1934  rdata[1] = hi2;
1935  } else {
1936  // When the signs differ, we need to implement addition as a subtraction.
1937  // NOTE: this also includes the case in which only one of the operands is zero.
1938  if (asize1 > asize2 || (asize1 == asize2 && compare_limbs_2(data1, data2, asize1) >= 0)) {
1939  // op1 is >= op2 in absolute value.
1940  const auto lo = data1[0] - data2[0];
1941  // If there's a borrow, then hi1 > hi2, otherwise we would have a negative result.
1942  assert(data1[0] >= data2[0] || data1[1] > data2[1]);
1943  // This can never wrap around, at most it goes to zero.
1944  const auto hi = data1[1] - data2[1] - static_cast<::mp_limb_t>(data1[0] < data2[0]);
1945  // asize can be 0, 1 or 2.
1946  rop._mp_size = 0;
1947  if (hi) {
1948  rop._mp_size = sign1 + sign1;
1949  } else if (lo) {
1950  rop._mp_size = sign1;
1951  }
1952  rdata[0] = lo;
1953  rdata[1] = hi;
1954  } else {
1955  // op2 is > op1 in absolute value.
1956  const auto lo = data2[0] - data1[0];
1957  assert(data2[0] >= data1[0] || data2[1] > data1[1]);
1958  const auto hi = data2[1] - data1[1] - static_cast<::mp_limb_t>(data2[0] < data1[0]);
1959  // asize can be 1 or 2, but not zero as we know abs(op1) != abs(op2).
1960  rop._mp_size = sign2;
1961  if (hi) {
1962  rop._mp_size = sign2 + sign2;
1963  }
1964  rdata[0] = lo;
1965  rdata[1] = hi;
1966  }
1967  }
1968  return true;
1969  }
1970  template <bool AddOrSub>
1971  static bool static_addsub(s_int &rop, const s_int &op1, const s_int &op2)
1972  {
1973  // NOTE: effectively negate op2 if we are subtracting.
1974  mpz_size_t asize1 = op1._mp_size, asize2 = AddOrSub ? op2._mp_size : -op2._mp_size;
1975  int sign1 = asize1 != 0, sign2 = asize2 != 0;
1976  if (asize1 < 0) {
1977  asize1 = -asize1;
1978  sign1 = -1;
1979  }
1980  if (asize2 < 0) {
1981  asize2 = -asize2;
1982  sign2 = -1;
1983  }
1984  const bool retval = static_add_impl(rop, op1, op2, asize1, asize2, sign1, sign2, static_add_algo<s_int>{});
1985  if (static_add_algo<s_int>::value == 0 && retval) {
1986  // If we used the mpn functions and we actually wrote into rop, then
1987  // make sure we zero out the unused limbs.
1988  rop.zero_unused_limbs();
1989  }
1990  return retval;
1991  }
1992  // Dispatching for the binary addition operator.
1993  static mp_integer dispatch_binary_add(const mp_integer &op1, const mp_integer &op2)
1994  {
1995  mp_integer retval;
1996  add(retval, op1, op2);
1997  return retval;
1998  }
1999  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
2000  static mp_integer dispatch_binary_add(const mp_integer &op1, T n)
2001  {
2002  mp_integer retval{n};
2003  add(retval, retval, op1);
2004  return retval;
2005  }
2006  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
2007  static mp_integer dispatch_binary_add(T n, const mp_integer &op2)
2008  {
2009  return dispatch_binary_add(op2, n);
2010  }
2011  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
2012  static T dispatch_binary_add(const mp_integer &op1, T x)
2013  {
2014  return static_cast<T>(op1) + x;
2015  }
2016  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
2017  static T dispatch_binary_add(T x, const mp_integer &op2)
2018  {
2019  return dispatch_binary_add(op2, x);
2020  }
2021  // Dispatching for in-place add.
2022  static void dispatch_in_place_add(mp_integer &retval, const mp_integer &n)
2023  {
2024  add(retval, retval, n);
2025  }
2026  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
2027  static void dispatch_in_place_add(mp_integer &retval, const T &n)
2028  {
2029  add(retval, retval, mp_integer{n});
2030  }
2031  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
2032  static void dispatch_in_place_add(mp_integer &retval, const T &x)
2033  {
2034  retval = static_cast<T>(retval) + x;
2035  }
2036  // Dispatching for the binary subtraction operator.
2037  static mp_integer dispatch_binary_sub(const mp_integer &op1, const mp_integer &op2)
2038  {
2039  mp_integer retval;
2040  sub(retval, op1, op2);
2041  return retval;
2042  }
2043  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
2044  static mp_integer dispatch_binary_sub(const mp_integer &op1, T n)
2045  {
2046  mp_integer retval{n};
2047  sub(retval, retval, op1);
2048  retval.neg();
2049  return retval;
2050  }
2051  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
2052  static mp_integer dispatch_binary_sub(T n, const mp_integer &op2)
2053  {
2054  auto retval = dispatch_binary_sub(op2, n);
2055  retval.neg();
2056  return retval;
2057  }
2058  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
2059  static T dispatch_binary_sub(const mp_integer &op1, T x)
2060  {
2061  return static_cast<T>(op1) - x;
2062  }
2063  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
2064  static T dispatch_binary_sub(T x, const mp_integer &op2)
2065  {
2066  return -dispatch_binary_sub(op2, x);
2067  }
2068  // Dispatching for in-place sub.
2069  static void dispatch_in_place_sub(mp_integer &retval, const mp_integer &n)
2070  {
2071  sub(retval, retval, n);
2072  }
2073  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
2074  static void dispatch_in_place_sub(mp_integer &retval, const T &n)
2075  {
2076  sub(retval, retval, mp_integer{n});
2077  }
2078  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
2079  static void dispatch_in_place_sub(mp_integer &retval, const T &x)
2080  {
2081  retval = static_cast<T>(retval) - x;
2082  }
2083 
2084 public:
2086 
2093  friend void add(mp_integer &rop, const mp_integer &op1, const mp_integer &op2)
2094  {
2095  const bool sr = rop.is_static(), s1 = op1.is_static(), s2 = op2.is_static();
2096  if (mppp_likely(sr && s1 && s2)) {
2097  // Optimise the case of all statics.
2098  if (mppp_likely(static_addsub<true>(rop.m_int.g_st(), op1.m_int.g_st(), op2.m_int.g_st()))) {
2099  return;
2100  }
2101  }
2102  if (sr) {
2103  rop.m_int.promote(SSize + 1u);
2104  }
2105  ::mpz_add(&rop.m_int.g_dy(), op1.get_mpz_view(), op2.get_mpz_view());
2106  }
2108 
2112  {
2113  return *this;
2114  }
2116 
2122  template <typename T, typename U>
2123  friend common_t<T, U> operator+(const T &op1, const U &op2)
2124  {
2125  return dispatch_binary_add(op1, op2);
2126  }
2128 
2136  template <typename T, in_place_enabler<T> = 0>
2137  mp_integer &operator+=(const T &op)
2138  {
2139  dispatch_in_place_add(*this, op);
2140  return *this;
2141  }
2142 
2143 private:
2144 #if defined(_MSC_VER)
2145  template <typename T>
2146  using in_place_lenabler = enable_if_t<is_supported_interop<T>::value, T &>;
2147 #else
2148  template <typename T>
2149  using in_place_lenabler = enable_if_t<is_supported_interop<T>::value, int>;
2150 #endif
2151 
2152 public:
2153 #if defined(_MSC_VER)
2154  template <typename T>
2155  friend in_place_lenabler<T> operator+=(T &x, const mp_integer &n)
2156 #else
2157 
2175  template <typename T, in_place_lenabler<T> = 0>
2176  friend T &operator+=(T &x, const mp_integer &n)
2177 #endif
2178  {
2179  // NOTE: if x is an integral, then the static cast is a generic conversion from
2180  // mp_integer to the integral, which can fail because of overflow. Otherwise, the
2181  // static cast is a redundant cast to float of x + n, which is already a float.
2182  return x = static_cast<T>(x + n);
2183  }
2185 
2191  {
2192  add_ui(*this, *this, 1u);
2193  return *this;
2194  }
2196 
2202  {
2203  mp_integer retval(*this);
2204  ++(*this);
2205  return retval;
2206  }
2208 
2215  friend void sub(mp_integer &rop, const mp_integer &op1, const mp_integer &op2)
2216  {
2217  const bool sr = rop.is_static(), s1 = op1.is_static(), s2 = op2.is_static();
2218  if (mppp_likely(sr && s1 && s2)) {
2219  // Optimise the case of all statics.
2220  if (mppp_likely(static_addsub<false>(rop.m_int.g_st(), op1.m_int.g_st(), op2.m_int.g_st()))) {
2221  return;
2222  }
2223  }
2224  if (sr) {
2225  rop.m_int.promote(SSize + 1u);
2226  }
2227  ::mpz_sub(&rop.m_int.g_dy(), op1.get_mpz_view(), op2.get_mpz_view());
2228  }
2230 
2234  {
2235  mp_integer retval{*this};
2236  retval.neg();
2237  return retval;
2238  }
2240 
2246  template <typename T, typename U>
2247  friend common_t<T, U> operator-(const T &op1, const U &op2)
2248  {
2249  return dispatch_binary_sub(op1, op2);
2250  }
2252 
2260  template <typename T, in_place_enabler<T> = 0>
2261  mp_integer &operator-=(const T &op)
2262  {
2263  dispatch_in_place_sub(*this, op);
2264  return *this;
2265  }
2266 #if defined(_MSC_VER)
2267  template <typename T>
2268  friend in_place_lenabler<T> operator-=(T &x, const mp_integer &n)
2269 #else
2270 
2288  template <typename T, in_place_lenabler<T> = 0>
2289  friend T &operator-=(T &x, const mp_integer &n)
2290 #endif
2291  {
2292  return x = static_cast<T>(x - n);
2293  }
2295 
2301  {
2302  // NOTE: this should be written in terms of sub_ui(), once implemented.
2303  neg();
2304  add_ui(*this, *this, 1u);
2305  neg();
2306  return *this;
2307  }
2309 
2315  {
2316  mp_integer retval(*this);
2317  --(*this);
2318  return retval;
2319  }
2320 
2321 private:
2322  // Metaprogramming for selecting the algorithm for static addition with ui. The selection happens via
2323  // an std::integral_constant with 3 possible values:
2324  // - 0 (default case): use the GMP mpn functions,
2325  // - 1: selected when there are no nail bits and the static size is 1,
2326  // - 2: selected when there are no nail bits and the static size is 2.
2327  template <typename SInt>
2328  using static_add_ui_algo = std::integral_constant<int, (!GMP_NAIL_BITS && SInt::s_size == 1)
2329  ? 1
2330  : ((!GMP_NAIL_BITS && SInt::s_size == 2) ? 2 : 0)>;
2331  // mpn implementation.
2332  static bool static_add_ui_impl(s_int &rop, const s_int &op1, mpz_size_t asize1, int sign1, unsigned long op2,
2333  const std::integral_constant<int, 0> &)
2334  {
2335  using mppp_impl::copy_limbs;
2336  auto rdata = &rop.m_limbs[0];
2337  auto data1 = &op1.m_limbs[0];
2338  const auto size1 = op1._mp_size;
2339  const auto l2 = static_cast<::mp_limb_t>(op2);
2340  // mpn functions require nonzero arguments.
2341  if (mppp_unlikely(!l2)) {
2342  rop._mp_size = size1;
2343  copy_limbs(data1, data1 + asize1, rdata);
2344  return true;
2345  }
2346  if (mppp_unlikely(!sign1)) {
2347  // NOTE: this has to be 1 because l2 == 0 is handled above.
2348  rop._mp_size = 1;
2349  rdata[0] = l2;
2350  return true;
2351  }
2352  // This is the same overflow check as in static_add().
2353  const bool c1 = std::size_t(asize1) == SSize && ((data1[asize1 - 1] & GMP_NUMB_MASK) >> (GMP_NUMB_BITS - 1));
2354  const bool c2 = SSize == 1u && (l2 >> (GMP_NUMB_BITS - 1));
2355  if (mppp_unlikely(c1 || c2)) {
2356  return false;
2357  }
2358  if (sign1 == 1) {
2359  // op1 and op2 are both strictly positive.
2360  if (::mpn_add_1(rdata, data1, static_cast<::mp_size_t>(asize1), l2)) {
2361  assert(asize1 < s_int::s_size);
2362  rop._mp_size = size1 + 1;
2363  // NOTE: there should be no need to use GMP_NUMB_MASK here.
2364  rdata[asize1] = 1u;
2365  } else {
2366  // Without carry, the size is unchanged.
2367  rop._mp_size = size1;
2368  }
2369  } else {
2370  // op1 is strictly negative, op2 is strictly positive.
2371  if (asize1 > 1 || (asize1 == 1 && (data1[0] & GMP_NUMB_MASK) >= l2)) {
2372  // abs(op1) >= abs(op2).
2373  const auto br = ::mpn_sub_1(rdata, data1, static_cast<::mp_size_t>(asize1), l2);
2374  (void)br;
2375  assert(!br);
2376  // The asize can be the original one or original - 1 (we subtracted a limb).
2377  rop._mp_size = size1 + static_cast<mpz_size_t>(!(rdata[asize1 - 1] & GMP_NUMB_MASK));
2378  } else {
2379  // abs(op2) > abs(op1).
2380  const auto br = ::mpn_sub_1(rdata, &l2, 1, data1[0]);
2381  (void)br;
2382  assert(!br);
2383  // The size must be 1, as abs(op2) == abs(op1) is handled above.
2384  assert((rdata[0] & GMP_NUMB_MASK));
2385  rop._mp_size = 1;
2386  }
2387  }
2388  return true;
2389  }
2390  // 1-limb optimisation (no nails).
2391  static bool static_add_ui_impl(s_int &rop, const s_int &op1, mpz_size_t, int sign1, unsigned long op2,
2392  const std::integral_constant<int, 1> &)
2393  {
2394  using mppp_impl::limb_add_overflow;
2395  const auto l1 = op1.m_limbs[0];
2396  const auto l2 = static_cast<::mp_limb_t>(op2);
2397  ::mp_limb_t tmp;
2398  if (sign1 >= 0) {
2399  // True unsigned addition.
2400  if (mppp_unlikely(limb_add_overflow(l1, l2, &tmp))) {
2401  return false;
2402  }
2403  rop._mp_size = (tmp != 0u);
2404  rop.m_limbs[0] = tmp;
2405  } else {
2406  // op1 is negative, op2 is non-negative. Implement as a sub.
2407  if (l1 >= l2) {
2408  // op1 has larger or equal abs. The result will be non-positive.
2409  tmp = l1 - l2;
2410  // size is -1 or 0, 0 iff l1 == l2.
2411  rop._mp_size = -static_cast<mpz_size_t>(tmp != 0u);
2412  rop.m_limbs[0] = tmp;
2413  } else {
2414  // op1 has smaller abs. The result will be positive.
2415  rop._mp_size = 1;
2416  rop.m_limbs[0] = l2 - l1;
2417  }
2418  }
2419  return true;
2420  }
2421  // 2-limb optimisation (no nails).
2422  static bool static_add_ui_impl(s_int &rop, const s_int &op1, mpz_size_t asize1, int sign1, unsigned long op2,
2423  const std::integral_constant<int, 2> &)
2424  {
2425  using mppp_impl::limb_add_overflow;
2426  auto rdata = &rop.m_limbs[0];
2427  auto data1 = &op1.m_limbs[0];
2428  const auto l2 = static_cast<::mp_limb_t>(op2);
2429  if (sign1 >= 0) {
2430  // True unsigned addition.
2431  // These two limbs will contain the result.
2432  ::mp_limb_t lo, hi;
2433  // Add l2 to the low limb, placing the result in lo.
2434  const ::mp_limb_t cy_lo = limb_add_overflow(data1[0], l2, &lo);
2435  // Add the carry from the low addition to the high limb, placing the result in hi.
2436  const ::mp_limb_t cy_hi = limb_add_overflow(data1[1], cy_lo, &hi);
2437  if (mppp_unlikely(cy_hi)) {
2438  return false;
2439  }
2440  // Compute the new size. It can be 0, 1 or 2.
2441  rop._mp_size = hi ? 2 : (lo ? 1 : 0);
2442  // Write out.
2443  rdata[0] = lo;
2444  rdata[1] = hi;
2445  } else {
2446  // op1 is negative, l2 is non-negative. Compare their absolute values.
2447  if (asize1 == 2 || data1[0] >= l2) {
2448  // op1 is >= op2 in absolute value.
2449  const auto lo = data1[0] - l2;
2450  // Sub from hi the borrow.
2451  const auto hi = data1[1] - static_cast<::mp_limb_t>(data1[0] < l2);
2452  // The final size can be -2, -1 or 0.
2453  rop._mp_size = hi ? -2 : (lo ? -1 : 0);
2454  rdata[0] = lo;
2455  rdata[1] = hi;
2456  } else {
2457  // op2 > op1 in absolute value.
2458  // Size has to be 1.
2459  rop._mp_size = 1;
2460  rdata[0] = l2 - data1[0];
2461  rdata[1] = 0u;
2462  }
2463  }
2464  return true;
2465  }
2466  static bool static_add_ui(s_int &rop, const s_int &op1, unsigned long op2)
2467  {
2468  mpz_size_t asize1 = op1._mp_size;
2469  int sign1 = asize1 != 0;
2470  if (asize1 < 0) {
2471  asize1 = -asize1;
2472  sign1 = -1;
2473  }
2474  const bool retval = static_add_ui_impl(rop, op1, asize1, sign1, op2, static_add_ui_algo<s_int>{});
2475  if (static_add_ui_algo<s_int>::value == 0 && retval) {
2476  // If we used the mpn functions and we actually wrote into rop, then
2477  // make sure we zero out the unused limbs.
2478  rop.zero_unused_limbs();
2479  }
2480  return retval;
2481  }
2482 
2483 public:
2485 
2492  friend void add_ui(mp_integer &rop, const mp_integer &op1, unsigned long op2)
2493  {
2494  if (std::numeric_limits<unsigned long>::max() > GMP_NUMB_MASK) {
2495  // For the optimised version below to kick in we need to be sure we can safely convert
2496  // unsigned long to an ::mp_limb_t, modulo nail bits. This because in the optimised version
2497  // we cast forcibly op2 to ::mp_limb_t. Otherwise, we just call add() after converting op2 to an
2498  // mp_integer.
2499  add(rop, op1, mp_integer{op2});
2500  return;
2501  }
2502  const bool sr = rop.is_static(), s1 = op1.is_static();
2503  if (mppp_likely(sr && s1)) {
2504  // Optimise the case of all statics.
2505  if (mppp_likely(static_add_ui(rop.m_int.g_st(), op1.m_int.g_st(), op2))) {
2506  return;
2507  }
2508  }
2509  if (sr) {
2510  rop.m_int.promote(SSize + 1u);
2511  }
2512  ::mpz_add_ui(&rop.m_int.g_dy(), op1.get_mpz_view(), op2);
2513  }
2514 
2515 private:
2516  // The double limb multiplication optimization is available in the following cases:
2517  // - no nails, we are on a 64bit MSVC build, the limb type has exactly 64 bits and GMP_NUMB_BITS is 64,
2518  // - no nails, we have a 128bit unsigned available, the limb type has exactly 64 bits and GMP_NUMB_BITS is 64,
2519  // - no nails, the smallest 64 bit unsigned type has exactly 64 bits, the limb type has exactly 32 bits and
2520  // GMP_NUMB_BITS is 32.
2521  // NOTE: here we are checking that GMP_NUMB_BITS is the same as the limits ::digits property, which is probably
2522  // rather redundant on any conceivable architecture.
2523  using have_dlimb_mul = std::integral_constant<bool,
2524 #if (defined(_MSC_VER) && defined(_WIN64) && (GMP_NUMB_BITS == 64)) || (defined(MPPP_UINT128) && (GMP_NUMB_BITS == 64))
2525  !GMP_NAIL_BITS && std::numeric_limits<::mp_limb_t>::digits == 64
2526 #elif GMP_NUMB_BITS == 32
2527  !GMP_NAIL_BITS
2528  && std::numeric_limits<std::uint_least64_t>::digits == 64
2529  && std::numeric_limits<::mp_limb_t>::digits == 32
2530 #else
2531  false
2532 #endif
2533  >;
2534  template <typename SInt>
2535  using static_mul_algo = std::integral_constant<int, (SInt::s_size == 1 && have_dlimb_mul::value)
2536  ? 1
2537  : ((SInt::s_size == 2 && have_dlimb_mul::value) ? 2 : 0)>;
2538  // mpn implementation.
2539  // NOTE: this function (and the other overloads) returns 0 in case of success, otherwise it returns a hint
2540  // about the size in limbs of the result.
2541  static std::size_t static_mul_impl(s_int &rop, const s_int &op1, const s_int &op2, mpz_size_t asize1,
2542  mpz_size_t asize2, int sign1, int sign2, const std::integral_constant<int, 0> &)
2543  {
2544  using mppp_impl::copy_limbs_no;
2545  // Handle zeroes.
2546  if (mppp_unlikely(!sign1 || !sign2)) {
2547  rop._mp_size = 0;
2548  return 0u;
2549  }
2550  auto rdata = &rop.m_limbs[0];
2551  auto data1 = &op1.m_limbs[0], data2 = &op2.m_limbs[0];
2552  const auto max_asize = std::size_t(asize1 + asize2);
2553  // Temporary storage, to be used if we cannot write into rop.
2554  std::array<::mp_limb_t, SSize * 2u> res;
2555  // We can write directly into rop if these conditions hold:
2556  // - rop does not overlap with op1 and op2,
2557  // - SSize is large enough to hold the max size of the result.
2558  ::mp_limb_t *MPPP_RESTRICT res_data
2559  = (rdata != data1 && rdata != data2 && max_asize <= SSize) ? rdata : res.data();
2560  // Proceed to the multiplication.
2561  ::mp_limb_t hi;
2562  if (asize2 == 1) {
2563  // NOTE: the 1-limb versions do not write the hi limb, we have to write it ourselves.
2564  hi = ::mpn_mul_1(res_data, data1, static_cast<::mp_size_t>(asize1), data2[0]);
2565  res_data[asize1] = hi;
2566  } else if (asize1 == 1) {
2567  hi = ::mpn_mul_1(res_data, data2, static_cast<::mp_size_t>(asize2), data1[0]);
2568  res_data[asize2] = hi;
2569  } else if (asize1 == asize2) {
2570  ::mpn_mul_n(res_data, data1, data2, static_cast<::mp_size_t>(asize1));
2571  hi = res_data[2 * asize1 - 1];
2572  } else if (asize1 >= asize2) {
2573  hi = ::mpn_mul(res_data, data1, static_cast<::mp_size_t>(asize1), data2, static_cast<::mp_size_t>(asize2));
2574  } else {
2575  hi = ::mpn_mul(res_data, data2, static_cast<::mp_size_t>(asize2), data1, static_cast<::mp_size_t>(asize1));
2576  }
2577  // The actual size.
2578  const std::size_t asize = max_asize - unsigned(hi == 0u);
2579  if (res_data == rdata) {
2580  // If we wrote directly into rop, it means that we had enough storage in it to begin with.
2581  rop._mp_size = mpz_size_t(asize);
2582  if (sign1 != sign2) {
2583  rop._mp_size = -rop._mp_size;
2584  }
2585  return 0u;
2586  }
2587  // If we used the temporary storage, we need to check if we can write into rop.
2588  if (asize > SSize) {
2589  // Not enough space, return the size.
2590  return asize;
2591  }
2592  // Enough space, set size and copy limbs.
2593  rop._mp_size = mpz_size_t(asize);
2594  if (sign1 != sign2) {
2595  rop._mp_size = -rop._mp_size;
2596  }
2597  copy_limbs_no(res_data, res_data + asize, rdata);
2598  return 0u;
2599  }
2600 #if defined(_MSC_VER) && defined(_WIN64) && (GMP_NUMB_BITS == 64) && !GMP_NAIL_BITS
2601  static ::mp_limb_t dlimb_mul(::mp_limb_t op1, ::mp_limb_t op2, ::mp_limb_t *hi)
2602  {
2603  return ::UnsignedMultiply128(op1, op2, hi);
2604  }
2605 #elif defined(MPPP_UINT128) && (GMP_NUMB_BITS == 64) && !GMP_NAIL_BITS
2606  static ::mp_limb_t dlimb_mul(::mp_limb_t op1, ::mp_limb_t op2, ::mp_limb_t *hi)
2607  {
2608  using dlimb_t = MPPP_UINT128;
2609  const dlimb_t res = dlimb_t(op1) * op2;
2610  *hi = static_cast<::mp_limb_t>(res >> 64);
2611  return static_cast<::mp_limb_t>(res);
2612  }
2613 #elif GMP_NUMB_BITS == 32 && !GMP_NAIL_BITS
2614  static ::mp_limb_t dlimb_mul(::mp_limb_t op1, ::mp_limb_t op2, ::mp_limb_t *hi)
2615  {
2616  using dlimb_t = std::uint_least64_t;
2617  const dlimb_t res = dlimb_t(op1) * op2;
2618  *hi = static_cast<::mp_limb_t>(res >> 32);
2619  return static_cast<::mp_limb_t>(res);
2620  }
2621 #endif
2622  // 1-limb optimization via dlimb.
2623  static std::size_t static_mul_impl(s_int &rop, const s_int &op1, const s_int &op2, mpz_size_t, mpz_size_t,
2624  int sign1, int sign2, const std::integral_constant<int, 1> &)
2625  {
2626  ::mp_limb_t hi;
2627  const ::mp_limb_t lo = dlimb_mul(op1.m_limbs[0], op2.m_limbs[0], &hi);
2628  if (mppp_unlikely(hi)) {
2629  return 2u;
2630  }
2631  const mpz_size_t asize = (lo != 0u);
2632  rop._mp_size = asize;
2633  if (sign1 != sign2) {
2634  rop._mp_size = -rop._mp_size;
2635  }
2636  rop.m_limbs[0] = lo;
2637  return 0u;
2638  }
2639  // 2-limb optimization via dlimb.
2640  static std::size_t static_mul_impl(s_int &rop, const s_int &op1, const s_int &op2, mpz_size_t asize1,
2641  mpz_size_t asize2, int sign1, int sign2, const std::integral_constant<int, 2> &)
2642  {
2643  using mppp_impl::limb_add_overflow;
2644  if (mppp_unlikely(!asize1 || !asize2)) {
2645  // Handle zeroes.
2646  rop._mp_size = 0;
2647  rop.m_limbs[0] = 0u;
2648  rop.m_limbs[1] = 0u;
2649  return 0u;
2650  }
2651  if (asize1 == 1 && asize2 == 1) {
2652  rop.m_limbs[0] = dlimb_mul(op1.m_limbs[0], op2.m_limbs[0], &rop.m_limbs[1]);
2653  rop._mp_size = static_cast<mpz_size_t>((asize1 + asize2) - mpz_size_t(rop.m_limbs[1] == 0u));
2654  if (sign1 != sign2) {
2655  rop._mp_size = -rop._mp_size;
2656  }
2657  return 0u;
2658  }
2659  if (asize1 != asize2) {
2660  // The only possibility of success is 2limbs x 1limb.
2661  //
2662  // b a X
2663  // c
2664  // --------------------
2665  // tmp[2] tmp[1] tmp[0]
2666  //
2667  ::mp_limb_t a = op1.m_limbs[0], b = op1.m_limbs[1], c = op2.m_limbs[0];
2668  if (asize1 < asize2) {
2669  // Switch them around if needed.
2670  a = op2.m_limbs[0], b = op2.m_limbs[1], c = op1.m_limbs[0];
2671  }
2672  ::mp_limb_t ca_lo, ca_hi, cb_lo, cb_hi, tmp0, tmp1, tmp2;
2673  ca_lo = dlimb_mul(c, a, &ca_hi);
2674  cb_lo = dlimb_mul(c, b, &cb_hi);
2675  tmp0 = ca_lo;
2676  const auto cy = limb_add_overflow(cb_lo, ca_hi, &tmp1);
2677  tmp2 = cb_hi + cy;
2678  // Now determine the size. asize must be at least 2.
2679  const mpz_size_t asize = 2 + mpz_size_t(tmp2 != 0u);
2680  if (asize == 2) {
2681  // Size is good, write out the result.
2682  rop._mp_size = asize;
2683  if (sign1 != sign2) {
2684  rop._mp_size = -rop._mp_size;
2685  }
2686  rop.m_limbs[0] = tmp0;
2687  rop.m_limbs[1] = tmp1;
2688  return 0u;
2689  }
2690  }
2691  // Return 4 as a size hint. Real size could be 3, but GMP will require 4 limbs
2692  // of storage to perform the operation anyway.
2693  return 4u;
2694  }
2695  static std::size_t static_mul(s_int &rop, const s_int &op1, const s_int &op2)
2696  {
2697  // Cache a few quantities, detect signs.
2698  mpz_size_t asize1 = op1._mp_size, asize2 = op2._mp_size;
2699  int sign1 = asize1 != 0, sign2 = asize2 != 0;
2700  if (asize1 < 0) {
2701  asize1 = -asize1;
2702  sign1 = -1;
2703  }
2704  if (asize2 < 0) {
2705  asize2 = -asize2;
2706  sign2 = -1;
2707  }
2708  const std::size_t retval
2709  = static_mul_impl(rop, op1, op2, asize1, asize2, sign1, sign2, static_mul_algo<s_int>{});
2710  if (static_mul_algo<s_int>::value == 0 && retval == 0u) {
2711  rop.zero_unused_limbs();
2712  }
2713  return retval;
2714  }
2715  // Dispatching for the binary multiplication operator.
2716  static mp_integer dispatch_binary_mul(const mp_integer &op1, const mp_integer &op2)
2717  {
2718  mp_integer retval;
2719  mul(retval, op1, op2);
2720  return retval;
2721  }
2722  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
2723  static mp_integer dispatch_binary_mul(const mp_integer &op1, T n)
2724  {
2725  // NOTE: with respect to addition, here we separate the retval
2726  // from the operands. Having a separate destination is generally better
2727  // for multiplication.
2728  mp_integer retval;
2729  mul(retval, op1, mp_integer{n});
2730  return retval;
2731  }
2732  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
2733  static mp_integer dispatch_binary_mul(T n, const mp_integer &op2)
2734  {
2735  return dispatch_binary_mul(op2, n);
2736  }
2737  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
2738  static T dispatch_binary_mul(const mp_integer &op1, T x)
2739  {
2740  return static_cast<T>(op1) * x;
2741  }
2742  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
2743  static T dispatch_binary_mul(T x, const mp_integer &op2)
2744  {
2745  return dispatch_binary_mul(op2, x);
2746  }
2747  // Dispatching for in-place multiplication.
2748  static void dispatch_in_place_mul(mp_integer &retval, const mp_integer &n)
2749  {
2750  mul(retval, retval, n);
2751  }
2752  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
2753  static void dispatch_in_place_mul(mp_integer &retval, const T &n)
2754  {
2755  mul(retval, retval, mp_integer{n});
2756  }
2757  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
2758  static void dispatch_in_place_mul(mp_integer &retval, const T &x)
2759  {
2760  retval = static_cast<T>(retval) * x;
2761  }
2762 
2763 public:
2765 
2772  friend void mul(mp_integer &rop, const mp_integer &op1, const mp_integer &op2)
2773  {
2774  const bool sr = rop.is_static(), s1 = op1.is_static(), s2 = op2.is_static();
2775  std::size_t size_hint = 0u;
2776  if (mppp_likely(sr && s1 && s2)) {
2777  size_hint = static_mul(rop.m_int.g_st(), op1.m_int.g_st(), op2.m_int.g_st());
2778  if (mppp_likely(size_hint == 0u)) {
2779  return;
2780  }
2781  }
2782  if (sr) {
2783  // We use the size hint from the static_mul if available, otherwise a normal promotion will take place.
2784  // NOTE: here the best way of proceeding would be to calculate the max size of the result based on
2785  // the op1/op2 sizes, but for whatever reason this computation has disastrous performance consequences
2786  // on micro-benchmarks. We need to understand if that's the case in real-world scenarios as well, and
2787  // revisit this.
2788  rop.m_int.promote(size_hint);
2789  }
2790  ::mpz_mul(&rop.m_int.g_dy(), op1.get_mpz_view(), op2.get_mpz_view());
2791  }
2793 
2799  template <typename T, typename U>
2800  friend common_t<T, U> operator*(const T &op1, const U &op2)
2801  {
2802  return dispatch_binary_mul(op1, op2);
2803  }
2805 
2813  template <typename T, in_place_enabler<T> = 0>
2814  mp_integer &operator*=(const T &op)
2815  {
2816  dispatch_in_place_mul(*this, op);
2817  return *this;
2818  }
2819 #if defined(_MSC_VER)
2820  template <typename T>
2821  friend in_place_lenabler<T> operator*=(T &x, const mp_integer &n)
2822 #else
2823 
2841  template <typename T, in_place_lenabler<T> = 0>
2842  friend T &operator*=(T &x, const mp_integer &n)
2843 #endif
2844  {
2845  return x = static_cast<T>(x * n);
2846  }
2847 
2848 private:
2849  // Selection of the algorithm for addmul: if optimised algorithms exist for both add and mul, then use the
2850  // optimised addmul algos. Otherwise, use the mpn one.
2851  template <typename SInt>
2852  using static_addmul_algo
2853  = std::integral_constant<int,
2854  (static_add_algo<SInt>::value == 2 && static_mul_algo<SInt>::value == 2)
2855  ? 2
2856  : ((static_add_algo<SInt>::value == 1 && static_mul_algo<SInt>::value == 1) ? 1
2857  : 0)>;
2858  // NOTE: same return value as mul: 0 for success, otherwise a hint for the size of the result.
2859  static std::size_t static_addmul_impl(s_int &rop, const s_int &op1, const s_int &op2, mpz_size_t asizer,
2860  mpz_size_t asize1, mpz_size_t asize2, int signr, int sign1, int sign2,
2861  const std::integral_constant<int, 0> &)
2862  {
2863  // First try to do the static prod, if it does not work it's a failure.
2864  s_int prod;
2865  if (mppp_unlikely(
2866  static_mul_impl(prod, op1, op2, asize1, asize2, sign1, sign2, std::integral_constant<int, 0>{}))) {
2867  // This is the maximum size a static addmul can have.
2868  return SSize * 2u + 1u;
2869  }
2870  // Determine sign and asize of the product.
2871  mpz_size_t asize_prod = prod._mp_size;
2872  int sign_prod = (asize_prod != 0);
2873  if (asize_prod < 0) {
2874  asize_prod = -asize_prod;
2875  sign_prod = -1;
2876  }
2877  // Try to do the add.
2878  if (mppp_unlikely(!static_add_impl(rop, rop, prod, asizer, asize_prod, signr, sign_prod,
2879  std::integral_constant<int, 0>{}))) {
2880  return SSize + 1u;
2881  }
2882  return 0u;
2883  }
2884  static std::size_t static_addmul_impl(s_int &rop, const s_int &op1, const s_int &op2, mpz_size_t, mpz_size_t,
2885  mpz_size_t, int signr, int sign1, int sign2,
2886  const std::integral_constant<int, 1> &)
2887  {
2888  using mppp_impl::limb_add_overflow;
2889  // First we do op1 * op2.
2890  ::mp_limb_t tmp;
2891  const ::mp_limb_t prod = dlimb_mul(op1.m_limbs[0], op2.m_limbs[0], &tmp);
2892  if (mppp_unlikely(tmp)) {
2893  return 3u;
2894  }
2895  // Determine the sign of the product: 0, 1 or -1.
2896  int sign_prod = prod != 0u;
2897  if (sign1 != sign2) {
2898  sign_prod = -sign_prod;
2899  }
2900  // Now add/sub.
2901  if (signr == sign_prod) {
2902  // Same sign, do addition with overflow check.
2903  if (mppp_unlikely(limb_add_overflow(rop.m_limbs[0], prod, &tmp))) {
2904  return 2u;
2905  }
2906  // Assign the output.
2907  rop._mp_size = signr;
2908  rop.m_limbs[0] = tmp;
2909  } else {
2910  // When the signs differ, we need to implement addition as a subtraction.
2911  if (rop.m_limbs[0] >= prod) {
2912  // abs(rop) >= abs(prod).
2913  tmp = rop.m_limbs[0] - prod;
2914  // asize is either 1 or 0 (0 iff rop == prod).
2915  rop._mp_size = signr;
2916  if (mppp_unlikely(!tmp)) {
2917  rop._mp_size = 0;
2918  }
2919  rop.m_limbs[0] = tmp;
2920  } else {
2921  // NOTE: this cannot be zero, as rop and prod cannot be equal.
2922  rop._mp_size = sign_prod;
2923  rop.m_limbs[0] = prod - rop.m_limbs[0];
2924  }
2925  }
2926  return 0u;
2927  }
2928  static std::size_t static_addmul_impl(s_int &rop, const s_int &op1, const s_int &op2, mpz_size_t asizer,
2929  mpz_size_t asize1, mpz_size_t asize2, int signr, int sign1, int sign2,
2930  const std::integral_constant<int, 2> &)
2931  {
2932  using mppp_impl::limb_add_overflow;
2933  if (mppp_unlikely(!asize1 || !asize2)) {
2934  // If op1 or op2 are zero, rop will be unchanged.
2935  return 0u;
2936  }
2937  // Handle op1 * op2.
2938  std::array<::mp_limb_t, 2> prod;
2939  int sign_prod = 1;
2940  if (sign1 != sign2) {
2941  sign_prod = -1;
2942  }
2943  mpz_size_t asize_prod;
2944  if (asize1 == 1 && asize2 == 1) {
2945  prod[0] = dlimb_mul(op1.m_limbs[0], op2.m_limbs[0], &prod[1]);
2946  asize_prod = (asize1 + asize2) - mpz_size_t(prod[1] == 0u);
2947  } else {
2948  // The only possibility of success is 2limbs x 1limb.
2949  //
2950  // b a X
2951  // c
2952  // ---------------
2953  // prod[1] prod[0]
2954  //
2955  if (mppp_unlikely(asize1 == asize2)) {
2956  // This means that both sizes are 2.
2957  return 5u;
2958  }
2959  ::mp_limb_t a = op1.m_limbs[0], b = op1.m_limbs[1], c = op2.m_limbs[0];
2960  if (asize1 < asize2) {
2961  // Switch them around if needed.
2962  a = op2.m_limbs[0], b = op2.m_limbs[1], c = op1.m_limbs[0];
2963  }
2964  ::mp_limb_t ca_hi, cb_hi;
2965  // These are the three operations that build the result, two mults, one add.
2966  prod[0] = dlimb_mul(c, a, &ca_hi);
2967  prod[1] = dlimb_mul(c, b, &cb_hi);
2968  // NOTE: we can use this with overlapping arguments because the first two
2969  // are passed as copies.
2970  const auto cy = limb_add_overflow(prod[1], ca_hi, &prod[1]);
2971  // Check if the third limb exists, if so we return failure.
2972  if (mppp_unlikely(cb_hi || cy)) {
2973  return 4u;
2974  }
2975  asize_prod = 2;
2976  }
2977  // Proceed to the addition.
2978  if (signr == sign_prod) {
2979  // Add the hi and lo limbs.
2980  ::mp_limb_t lo, hi1, hi2;
2981  const ::mp_limb_t cy_lo = limb_add_overflow(rop.m_limbs[0], prod[0], &lo),
2982  cy_hi1 = limb_add_overflow(rop.m_limbs[1], prod[1], &hi1),
2983  cy_hi2 = limb_add_overflow(hi1, cy_lo, &hi2);
2984  // The result will overflow if we have any carry relating to the high limb.
2985  if (mppp_unlikely(cy_hi1 || cy_hi2)) {
2986  return 3u;
2987  }
2988  // For the size compuation:
2989  // - cannot be zero, as prod is not zero,
2990  // - if signr == +-1, then the size is either +-1 or +-2: asize is 2 if the result
2991  // has a nonzero 2nd limb, otherwise asize is 1.
2992  rop._mp_size = signr;
2993  if (hi2) {
2994  rop._mp_size = signr + signr;
2995  }
2996  rop.m_limbs[0] = lo;
2997  rop.m_limbs[1] = hi2;
2998  } else {
2999  // When the signs differ, we need to implement addition as a subtraction.
3000  if (asizer > asize_prod
3001  || (asizer == asize_prod && compare_limbs_2(&rop.m_limbs[0], &prod[0], asizer) >= 0)) {
3002  // rop >= prod in absolute value.
3003  const auto lo = rop.m_limbs[0] - prod[0];
3004  // If there's a borrow, then hi1 > hi2, otherwise we would have a negative result.
3005  assert(rop.m_limbs[0] >= prod[0] || rop.m_limbs[1] > prod[1]);
3006  // This can never wrap around, at most it goes to zero.
3007  const auto hi = rop.m_limbs[1] - prod[1] - static_cast<::mp_limb_t>(rop.m_limbs[0] < prod[0]);
3008  // asize can be 0, 1 or 2.
3009  rop._mp_size = 0;
3010  if (hi) {
3011  rop._mp_size = signr + signr;
3012  } else if (lo) {
3013  rop._mp_size = signr;
3014  }
3015  rop.m_limbs[0] = lo;
3016  rop.m_limbs[1] = hi;
3017  } else {
3018  // prod > rop in absolute value.
3019  const auto lo = prod[0] - rop.m_limbs[0];
3020  assert(prod[0] >= rop.m_limbs[0] || prod[1] > rop.m_limbs[1]);
3021  const auto hi = prod[1] - rop.m_limbs[1] - static_cast<::mp_limb_t>(prod[0] < rop.m_limbs[0]);
3022  // asize can be 1 or 2, but not zero as we know abs(prod) != abs(rop).
3023  rop._mp_size = sign_prod;
3024  if (hi) {
3025  rop._mp_size = sign_prod + sign_prod;
3026  }
3027  rop.m_limbs[0] = lo;
3028  rop.m_limbs[1] = hi;
3029  }
3030  }
3031  return 0u;
3032  }
3033  static std::size_t static_addmul(s_int &rop, const s_int &op1, const s_int &op2)
3034  {
3035  mpz_size_t asizer = rop._mp_size, asize1 = op1._mp_size, asize2 = op2._mp_size;
3036  int signr = asizer != 0, sign1 = asize1 != 0, sign2 = asize2 != 0;
3037  if (asizer < 0) {
3038  asizer = -asizer;
3039  signr = -1;
3040  }
3041  if (asize1 < 0) {
3042  asize1 = -asize1;
3043  sign1 = -1;
3044  }
3045  if (asize2 < 0) {
3046  asize2 = -asize2;
3047  sign2 = -1;
3048  }
3049  const std::size_t retval = static_addmul_impl(rop, op1, op2, asizer, asize1, asize2, signr, sign1, sign2,
3050  static_addmul_algo<s_int>{});
3051  if (static_addmul_algo<s_int>::value == 0 && retval == 0u) {
3052  rop.zero_unused_limbs();
3053  }
3054  return retval;
3055  }
3056 
3057 public:
3059 
3066  friend void addmul(mp_integer &rop, const mp_integer &op1, const mp_integer &op2)
3067  {
3068  const bool sr = rop.is_static(), s1 = op1.is_static(), s2 = op2.is_static();
3069  std::size_t size_hint = 0u;
3070  if (mppp_likely(sr && s1 && s2)) {
3071  size_hint = static_addmul(rop.m_int.g_st(), op1.m_int.g_st(), op2.m_int.g_st());
3072  if (mppp_likely(size_hint == 0u)) {
3073  return;
3074  }
3075  }
3076  if (sr) {
3077  rop.m_int.promote(size_hint);
3078  }
3079  ::mpz_addmul(&rop.m_int.g_dy(), op1.get_mpz_view(), op2.get_mpz_view());
3080  }
3081 
3082 private:
3083  // Detect the presence of dual-limb division. This is currently possible only if:
3084  // - we are on a 32bit build (with the usual constraints that the types have exactly 32/64 bits and no nails),
3085  // - we are on a 64bit build and we have the 128bit int type available (plus usual constraints).
3086  // MSVC currently does not provide any primitive for 128bit division.
3087  using have_dlimb_div = std::integral_constant<bool,
3088 #if defined(MPPP_UINT128) && (GMP_NUMB_BITS == 64)
3089  !GMP_NAIL_BITS && std::numeric_limits<::mp_limb_t>::digits == 64
3090 #elif GMP_NUMB_BITS == 32
3091  !GMP_NAIL_BITS
3092  && std::numeric_limits<std::uint_least64_t>::digits == 64
3093  && std::numeric_limits<::mp_limb_t>::digits == 32
3094 #else
3095  false
3096 #endif
3097  >;
3098  // Selection of algorithm for static division:
3099  // - for 1 limb, we can always do static division,
3100  // - for 2 limbs, we need the dual limb division if avaiable,
3101  // - otherwise we just use the mpn functions.
3102  template <typename SInt>
3103  using static_div_algo
3104  = std::integral_constant<int, SInt::s_size == 1 ? 1 : ((SInt::s_size == 2 && have_dlimb_div::value) ? 2 : 0)>;
3105  // mpn implementation.
3106  static void static_div_impl(s_int &q, s_int &r, const s_int &op1, const s_int &op2, mpz_size_t asize1,
3107  mpz_size_t asize2, int sign1, int sign2, const std::integral_constant<int, 0> &)
3108  {
3109  using mppp_impl::copy_limbs_no;
3110  // First we check if the divisor is larger than the dividend (in abs limb size), as the mpn function
3111  // requires asize1 >= asize2.
3112  if (asize2 > asize1) {
3113  // Copy op1 into the remainder.
3114  r = op1;
3115  // Zero out q.
3116  q._mp_size = 0;
3117  return;
3118  }
3119  // We need to take care of potentially overlapping arguments. We know that q and r are distinct, but op1
3120  // could overlap with q or r, and op2 could overlap with op1, q or r.
3121  std::array<::mp_limb_t, SSize> op1_alt, op2_alt;
3122  const ::mp_limb_t *data1 = op1.m_limbs.data();
3123  const ::mp_limb_t *data2 = op2.m_limbs.data();
3124  if (&op1 == &q || &op1 == &r) {
3125  copy_limbs_no(data1, data1 + asize1, op1_alt.data());
3126  data1 = op1_alt.data();
3127  }
3128  if (&op2 == &q || &op2 == &r || &op1 == &op2) {
3129  copy_limbs_no(data2, data2 + asize2, op2_alt.data());
3130  data2 = op2_alt.data();
3131  }
3132  // Small helper function to verify that all pointers are distinct. Used exclusively for debugging purposes.
3133  auto distinct_op = [&q, &r, data1, data2]() -> bool {
3134  const ::mp_limb_t *ptrs[] = {q.m_limbs.data(), r.m_limbs.data(), data1, data2};
3135  std::sort(ptrs, ptrs + 4, std::less<const ::mp_limb_t *>());
3136  return std::unique(ptrs, ptrs + 4) == (ptrs + 4);
3137  };
3138  (void)distinct_op;
3139  assert(distinct_op());
3140  // Proceed to the division.
3141  if (asize2 == 1) {
3142  // Optimization when the divisor has 1 limb.
3143  r.m_limbs[0]
3144  = ::mpn_divrem_1(q.m_limbs.data(), ::mp_size_t(0), data1, static_cast<::mp_size_t>(asize1), data2[0]);
3145  } else {
3146  // General implementation.
3147  ::mpn_tdiv_qr(q.m_limbs.data(), r.m_limbs.data(), ::mp_size_t(0), data1, static_cast<::mp_size_t>(asize1),
3148  data2, static_cast<::mp_size_t>(asize2));
3149  }
3150  // Complete the quotient: compute size and sign.
3151  q._mp_size = asize1 - asize2 + 1;
3152  while (q._mp_size && !(q.m_limbs[static_cast<std::size_t>(q._mp_size - 1)] & GMP_NUMB_MASK)) {
3153  --q._mp_size;
3154  }
3155  if (sign1 != sign2) {
3156  q._mp_size = -q._mp_size;
3157  }
3158  // Complete the remainder.
3159  r._mp_size = asize2;
3160  while (r._mp_size && !(r.m_limbs[static_cast<std::size_t>(r._mp_size - 1)] & GMP_NUMB_MASK)) {
3161  --r._mp_size;
3162  }
3163  if (sign1 == -1) {
3164  r._mp_size = -r._mp_size;
3165  }
3166  }
3167  // 1-limb optimisation.
3168  static void static_div_impl(s_int &q, s_int &r, const s_int &op1, const s_int &op2, mpz_size_t, mpz_size_t,
3169  int sign1, int sign2, const std::integral_constant<int, 1> &)
3170  {
3171  // NOTE: here we have to use GMP_NUMB_MASK because if s_size is 1 this implementation is *always*
3172  // called, even if we have nail bits (whereas the optimisation for other operations currently kicks in
3173  // only without nail bits). Thus, we need to discard from m_limbs[0] the nail bits before doing the
3174  // division.
3175  const ::mp_limb_t q_ = (op1.m_limbs[0] & GMP_NUMB_MASK) / (op2.m_limbs[0] & GMP_NUMB_MASK),
3176  r_ = (op1.m_limbs[0] & GMP_NUMB_MASK) % (op2.m_limbs[0] & GMP_NUMB_MASK);
3177  // Write q.
3178  q._mp_size = (q_ != 0u);
3179  if (sign1 != sign2) {
3180  q._mp_size = -q._mp_size;
3181  }
3182  // NOTE: there should be no need here to mask.
3183  q.m_limbs[0] = q_;
3184  // Write r.
3185  r._mp_size = (r_ != 0u);
3186  // Following C++11, the sign of r is the sign of op1:
3187  // http://stackoverflow.com/questions/13100711/operator-modulo-change-in-c-11
3188  if (sign1 == -1) {
3189  r._mp_size = -r._mp_size;
3190  }
3191  r.m_limbs[0] = r_;
3192  }
3193 #if defined(MPPP_UINT128) && (GMP_NUMB_BITS == 64) && !GMP_NAIL_BITS
3194  static void dlimb_div(::mp_limb_t op11, ::mp_limb_t op12, ::mp_limb_t op21, ::mp_limb_t op22,
3195  ::mp_limb_t *MPPP_RESTRICT q1, ::mp_limb_t *MPPP_RESTRICT q2, ::mp_limb_t *MPPP_RESTRICT r1,
3196  ::mp_limb_t *MPPP_RESTRICT r2)
3197  {
3198  using dlimb_t = MPPP_UINT128;
3199  const auto op1 = op11 + (dlimb_t(op12) << 64);
3200  const auto op2 = op21 + (dlimb_t(op22) << 64);
3201  const auto q = op1 / op2, r = op1 % op2;
3202  *q1 = static_cast<::mp_limb_t>(q & ::mp_limb_t(-1));
3203  *q2 = static_cast<::mp_limb_t>(q >> 64);
3204  *r1 = static_cast<::mp_limb_t>(r & ::mp_limb_t(-1));
3205  *r2 = static_cast<::mp_limb_t>(r >> 64);
3206  }
3207  // Without remainder.
3208  static void dlimb_div(::mp_limb_t op11, ::mp_limb_t op12, ::mp_limb_t op21, ::mp_limb_t op22,
3209  ::mp_limb_t *MPPP_RESTRICT q1, ::mp_limb_t *MPPP_RESTRICT q2)
3210  {
3211  using dlimb_t = MPPP_UINT128;
3212  const auto op1 = op11 + (dlimb_t(op12) << 64);
3213  const auto op2 = op21 + (dlimb_t(op22) << 64);
3214  const auto q = op1 / op2;
3215  *q1 = static_cast<::mp_limb_t>(q & ::mp_limb_t(-1));
3216  *q2 = static_cast<::mp_limb_t>(q >> 64);
3217  }
3218 #elif GMP_NUMB_BITS == 32 && !GMP_NAIL_BITS
3219  static void dlimb_div(::mp_limb_t op11, ::mp_limb_t op12, ::mp_limb_t op21, ::mp_limb_t op22,
3220  ::mp_limb_t *MPPP_RESTRICT q1, ::mp_limb_t *MPPP_RESTRICT q2, ::mp_limb_t *MPPP_RESTRICT r1,
3221  ::mp_limb_t *MPPP_RESTRICT r2)
3222  {
3223  using dlimb_t = std::uint_least64_t;
3224  const auto op1 = op11 + (dlimb_t(op12) << 32);
3225  const auto op2 = op21 + (dlimb_t(op22) << 32);
3226  const auto q = op1 / op2, r = op1 % op2;
3227  *q1 = static_cast<::mp_limb_t>(q & ::mp_limb_t(-1));
3228  *q2 = static_cast<::mp_limb_t>(q >> 32);
3229  *r1 = static_cast<::mp_limb_t>(r & ::mp_limb_t(-1));
3230  *r2 = static_cast<::mp_limb_t>(r >> 32);
3231  }
3232  static void dlimb_div(::mp_limb_t op11, ::mp_limb_t op12, ::mp_limb_t op21, ::mp_limb_t op22,
3233  ::mp_limb_t *MPPP_RESTRICT q1, ::mp_limb_t *MPPP_RESTRICT q2)
3234  {
3235  using dlimb_t = std::uint_least64_t;
3236  const auto op1 = op11 + (dlimb_t(op12) << 32);
3237  const auto op2 = op21 + (dlimb_t(op22) << 32);
3238  const auto q = op1 / op2;
3239  *q1 = static_cast<::mp_limb_t>(q & ::mp_limb_t(-1));
3240  *q2 = static_cast<::mp_limb_t>(q >> 32);
3241  }
3242 #endif
3243  // 2-limbs optimisation.
3244  static void static_div_impl(s_int &q, s_int &r, const s_int &op1, const s_int &op2, mpz_size_t asize1,
3245  mpz_size_t asize2, int sign1, int sign2, const std::integral_constant<int, 2> &)
3246  {
3247  if (asize1 < 2 && asize2 < 2) {
3248  // NOTE: testing indicates that it pays off to optimize the case in which the operands have
3249  // fewer than 2 limbs. This a slightly modified version of the 1-limb division from above,
3250  // without the need to mask as this function is called only if there are no nail bits.
3251  const ::mp_limb_t q_ = op1.m_limbs[0] / op2.m_limbs[0], r_ = op1.m_limbs[0] % op2.m_limbs[0];
3252  q._mp_size = (q_ != 0u);
3253  if (sign1 != sign2) {
3254  q._mp_size = -q._mp_size;
3255  }
3256  q.m_limbs[0] = q_;
3257  q.m_limbs[1] = 0u;
3258  r._mp_size = (r_ != 0u);
3259  if (sign1 == -1) {
3260  r._mp_size = -r._mp_size;
3261  }
3262  r.m_limbs[0] = r_;
3263  r.m_limbs[1] = 0u;
3264  return;
3265  }
3266  // Perform the division.
3267  ::mp_limb_t q1, q2, r1, r2;
3268  dlimb_div(op1.m_limbs[0], op1.m_limbs[1], op2.m_limbs[0], op2.m_limbs[1], &q1, &q2, &r1, &r2);
3269  // Write out.
3270  q._mp_size = q2 ? 2 : (q1 ? 1 : 0);
3271  if (sign1 != sign2) {
3272  q._mp_size = -q._mp_size;
3273  }
3274  q.m_limbs[0] = q1;
3275  q.m_limbs[1] = q2;
3276  r._mp_size = r2 ? 2 : (r1 ? 1 : 0);
3277  if (sign1 == -1) {
3278  r._mp_size = -r._mp_size;
3279  }
3280  r.m_limbs[0] = r1;
3281  r.m_limbs[1] = r2;
3282  }
3283  static void static_div(s_int &q, s_int &r, const s_int &op1, const s_int &op2)
3284  {
3285  mpz_size_t asize1 = op1._mp_size, asize2 = op2._mp_size;
3286  int sign1 = asize1 != 0, sign2 = asize2 != 0;
3287  if (asize1 < 0) {
3288  asize1 = -asize1;
3289  sign1 = -1;
3290  }
3291  if (asize2 < 0) {
3292  asize2 = -asize2;
3293  sign2 = -1;
3294  }
3295  static_div_impl(q, r, op1, op2, asize1, asize2, sign1, sign2, static_div_algo<s_int>{});
3296  if (static_div_algo<s_int>::value == 0) {
3297  q.zero_unused_limbs();
3298  r.zero_unused_limbs();
3299  }
3300  }
3301  // Dispatching for the binary division operator.
3302  static mp_integer dispatch_binary_div(const mp_integer &op1, const mp_integer &op2)
3303  {
3304  mp_integer retval, r;
3305  tdiv_qr(retval, r, op1, op2);
3306  return retval;
3307  }
3308  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
3309  static mp_integer dispatch_binary_div(const mp_integer &op1, T n)
3310  {
3311  mp_integer retval, r;
3312  tdiv_qr(retval, r, op1, mp_integer{n});
3313  return retval;
3314  }
3315  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
3316  static mp_integer dispatch_binary_div(T n, const mp_integer &op2)
3317  {
3318  mp_integer retval, r;
3319  tdiv_qr(retval, r, mp_integer{n}, op2);
3320  return retval;
3321  }
3322  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
3323  static T dispatch_binary_div(const mp_integer &op1, T x)
3324  {
3325  return static_cast<T>(op1) / x;
3326  }
3327  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
3328  static T dispatch_binary_div(T x, const mp_integer &op2)
3329  {
3330  return x / static_cast<T>(op2);
3331  }
3332  // Dispatching for in-place div.
3333  static void dispatch_in_place_div(mp_integer &retval, const mp_integer &n)
3334  {
3335  mp_integer r;
3336  tdiv_qr(retval, r, retval, n);
3337  }
3338  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
3339  static void dispatch_in_place_div(mp_integer &retval, const T &n)
3340  {
3341  mp_integer r;
3342  tdiv_qr(retval, r, retval, mp_integer{n});
3343  }
3344  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
3345  static void dispatch_in_place_div(mp_integer &retval, const T &x)
3346  {
3347  retval = static_cast<T>(retval) / x;
3348  }
3349  // Enablers for modulo operators. They are special because they don't accept floating-point.
3350  template <typename T, typename U>
3351  using common_mod_t
3352  = enable_if_t<conjunction<negation<std::is_floating_point<T>>, negation<std::is_floating_point<U>>>::value,
3353  common_t<T, U>>;
3354  template <typename T>
3355  using in_place_mod_enabler
3356  = enable_if_t<disjunction<is_supported_integral<T>, std::is_same<T, mp_integer>>::value, int>;
3357  // Dispatching for the binary modulo operator.
3358  static mp_integer dispatch_binary_mod(const mp_integer &op1, const mp_integer &op2)
3359  {
3360  mp_integer q, retval;
3361  tdiv_qr(q, retval, op1, op2);
3362  return retval;
3363  }
3364  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
3365  static mp_integer dispatch_binary_mod(const mp_integer &op1, T n)
3366  {
3367  mp_integer q, retval;
3368  tdiv_qr(q, retval, op1, mp_integer{n});
3369  return retval;
3370  }
3371  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
3372  static mp_integer dispatch_binary_mod(T n, const mp_integer &op2)
3373  {
3374  mp_integer q, retval;
3375  tdiv_qr(q, retval, mp_integer{n}, op2);
3376  return retval;
3377  }
3378  // Dispatching for in-place mod.
3379  static void dispatch_in_place_mod(mp_integer &retval, const mp_integer &n)
3380  {
3381  mp_integer q;
3382  tdiv_qr(q, retval, retval, n);
3383  }
3384  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
3385  static void dispatch_in_place_mod(mp_integer &retval, const T &n)
3386  {
3387  mp_integer q;
3388  tdiv_qr(q, retval, retval, mp_integer{n});
3389  }
3390 
3391 public:
3393 
3405  friend void tdiv_qr(mp_integer &q, mp_integer &r, const mp_integer &n, const mp_integer &d)
3406  {
3407  if (mppp_unlikely(&q == &r)) {
3408  throw std::invalid_argument("When performing a division with remainder, the quotient 'q' and the "
3409  "remainder 'r' must be distinct objects");
3410  }
3411  if (mppp_unlikely(d.sgn() == 0)) {
3412  throw zero_division_error("Integer division by zero");
3413  }
3414  const bool sq = q.is_static(), sr = r.is_static(), s1 = n.is_static(), s2 = d.is_static();
3415  if (mppp_likely(sq && sr && s1 && s2)) {
3416  static_div(q.m_int.g_st(), r.m_int.g_st(), n.m_int.g_st(), d.m_int.g_st());
3417  // Division can never fail.
3418  return;
3419  }
3420  if (sq) {
3421  q.m_int.promote();
3422  }
3423  if (sr) {
3424  r.m_int.promote();
3425  }
3426  ::mpz_tdiv_qr(&q.m_int.g_dy(), &r.m_int.g_dy(), n.get_mpz_view(), d.get_mpz_view());
3427  }
3429 
3437  template <typename T, typename U>
3438  friend common_t<T, U> operator/(const T &n, const U &d)
3439  {
3440  return dispatch_binary_div(n, d);
3441  }
3443 
3452  template <typename T, in_place_enabler<T> = 0>
3453  mp_integer &operator/=(const T &d)
3454  {
3455  dispatch_in_place_div(*this, d);
3456  return *this;
3457  }
3458 #if defined(_MSC_VER)
3459  template <typename T>
3460  friend in_place_lenabler<T> operator/=(T &x, const mp_integer &n)
3461 #else
3462 
3481  template <typename T, in_place_lenabler<T> = 0>
3482  friend T &operator/=(T &x, const mp_integer &n)
3483 #endif
3484  {
3485  return x = static_cast<T>(x / n);
3486  }
3488 
3496  template <typename T, typename U>
3497  friend common_mod_t<T, U> operator%(const T &n, const U &d)
3498  {
3499  return dispatch_binary_mod(n, d);
3500  }
3502 
3509  template <typename T, in_place_mod_enabler<T> = 0>
3510  mp_integer &operator%=(const T &d)
3511  {
3512  dispatch_in_place_mod(*this, d);
3513  return *this;
3514  }
3515 
3516 private:
3517  // Selection of the algorithm for static mul_2exp.
3518  template <typename SInt>
3519  using static_mul_2exp_algo = std::integral_constant<int, SInt::s_size == 1 ? 1 : (SInt::s_size == 2 ? 2 : 0)>;
3520  // mpn implementation.
3521  static std::size_t static_mul_2exp_impl(s_int &rop, const s_int &n, ::mp_bitcnt_t s,
3522  const std::integral_constant<int, 0> &)
3523  {
3524  using mppp_impl::copy_limbs_no;
3525  mpz_size_t asize = n._mp_size;
3526  if (s == 0u || asize == 0) {
3527  // If shift is zero, or the operand is zero, write n into rop and return success.
3528  rop = n;
3529  return 0u;
3530  }
3531  // Finish setting up asize and sign.
3532  int sign = asize != 0;
3533  if (asize < 0) {
3534  asize = -asize;
3535  sign = -1;
3536  }
3537  // ls: number of entire limbs shifted.
3538  // rs: effective shift that will be passed to the mpn function.
3539  const auto ls = s / GMP_NUMB_BITS, rs = s % GMP_NUMB_BITS;
3540  // At the very minimum, the new asize will be the old asize
3541  // plus ls.
3542  const mpz_size_t new_asize = asize + static_cast<mpz_size_t>(ls);
3543  if (std::size_t(new_asize) < SSize) {
3544  // In this case the operation will always succeed, and we can write directly into rop.
3545  ::mp_limb_t ret = 0u;
3546  if (rs) {
3547  // Perform the shift via the mpn function, if we are effectively shifting at least 1 bit.
3548  // Overlapping is fine, as it is guaranteed that rop.m_limbs.data() + ls >= n.m_limbs.data().
3549  ret = ::mpn_lshift(rop.m_limbs.data() + ls, n.m_limbs.data(), static_cast<::mp_size_t>(asize),
3550  unsigned(rs));
3551  // Write bits spilling out.
3552  rop.m_limbs[std::size_t(new_asize)] = ret;
3553  } else {
3554  // Otherwise, just copy over (the mpn function requires the shift to be at least 1).
3555  // NOTE: we have to use move_backward here because the ranges are overlapping and they do not
3556  // start at the same pointer (in which case we could've used copy_limbs()). Here we know ls is not
3557  // zero: we don't have a remainder, and s == 0 was already handled above. Hence, new_asize > asize.
3558  assert(new_asize > asize);
3559  std::move_backward(n.m_limbs.begin(), n.m_limbs.begin() + asize, rop.m_limbs.begin() + new_asize);
3560  }
3561  // Zero the lower limbs vacated by the shift.
3562  std::fill(rop.m_limbs.data(), rop.m_limbs.data() + ls, ::mp_limb_t(0));
3563  // Set the size.
3564  rop._mp_size = new_asize + (ret != 0u);
3565  if (sign == -1) {
3566  rop._mp_size = -rop._mp_size;
3567  }
3568  return 0u;
3569  }
3570  if (std::size_t(new_asize) == SSize) {
3571  if (rs) {
3572  // In this case the operation may fail, so we need to write to temporary storage.
3573  std::array<::mp_limb_t, SSize> tmp;
3574  if (::mpn_lshift(tmp.data(), n.m_limbs.data(), static_cast<::mp_size_t>(asize), unsigned(rs))) {
3575  return SSize + 1u;
3576  }
3577  // The shift was successful without spill over, copy the content from the tmp
3578  // storage into rop.
3579  copy_limbs_no(tmp.data(), tmp.data() + asize, rop.m_limbs.data() + ls);
3580  } else {
3581  // If we shifted by a multiple of the limb size, then we can write directly to rop.
3582  assert(new_asize > asize);
3583  std::move_backward(n.m_limbs.begin(), n.m_limbs.begin() + asize, rop.m_limbs.begin() + new_asize);
3584  }
3585  // Zero the lower limbs vacated by the shift.
3586  std::fill(rop.m_limbs.data(), rop.m_limbs.data() + ls, ::mp_limb_t(0));
3587  // Set the size.
3588  rop._mp_size = new_asize;
3589  if (sign == -1) {
3590  rop._mp_size = -rop._mp_size;
3591  }
3592  return 0u;
3593  }
3594  // Shift is too much, the size will overflow the static limit.
3595  // Return a hint for the size of the result.
3596  return std::size_t(new_asize) + 1u;
3597  }
3598  // 1-limb optimisation.
3599  static std::size_t static_mul_2exp_impl(s_int &rop, const s_int &n, ::mp_bitcnt_t s,
3600  const std::integral_constant<int, 1> &)
3601  {
3602  const ::mp_limb_t l = n.m_limbs[0] & GMP_NUMB_MASK;
3603  if (s == 0u || l == 0u) {
3604  // If shift is zero, or the operand is zero, write n into rop and return success.
3605  rop = n;
3606  return 0u;
3607  }
3608  if (mppp_unlikely(s >= GMP_NUMB_BITS || (l >> (GMP_NUMB_BITS - s)))) {
3609  // The two conditions:
3610  // - if the shift is >= number of data bits, the operation will certainly
3611  // fail (as the operand is nonzero);
3612  // - shifting by s would overflow the single limb.
3613  // NOTE: s is at least 1, so in the right shift above we never risk UB due to too much shift.
3614  // NOTE: for the size hint: s / nbits is the number of entire limbs shifted, +1 because the shifted
3615  // limbs add to the current size (1), +1 because another limb might be needed.
3616  return std::size_t(s) / GMP_NUMB_BITS + 2u;
3617  }
3618  // Write out.
3619  rop.m_limbs[0] = l << s;
3620  // Size is the same as the input value.
3621  rop._mp_size = n._mp_size;
3622  return 0u;
3623  }
3624  // 2-limb optimisation.
3625  static std::size_t static_mul_2exp_impl(s_int &rop, const s_int &n, ::mp_bitcnt_t s,
3626  const std::integral_constant<int, 2> &)
3627  {
3628  mpz_size_t asize = n._mp_size;
3629  if (s == 0u || asize == 0) {
3630  rop = n;
3631  return 0u;
3632  }
3633  int sign = asize != 0;
3634  if (asize < 0) {
3635  asize = -asize;
3636  sign = -1;
3637  }
3638  // Too much shift, this can never work on a nonzero value.
3639  static_assert(GMP_NUMB_BITS
3640  < std::numeric_limits<typename std::decay<decltype(GMP_NUMB_BITS)>::type>::max() / 2u,
3641  "Overflow error.");
3642  if (mppp_unlikely(s >= 2u * GMP_NUMB_BITS)) {
3643  // NOTE: this is the generic formula to estimate the final size.
3644  return std::size_t(s) / GMP_NUMB_BITS + 1u + std::size_t(asize);
3645  }
3646  if (s == GMP_NUMB_BITS) {
3647  // This case can be dealt with moving lo into hi, but only if asize is 1.
3648  if (mppp_unlikely(asize == 2)) {
3649  // asize is 2, shift is too much.
3650  return 3u;
3651  }
3652  rop.m_limbs[1u] = n.m_limbs[0u];
3653  rop.m_limbs[0u] = 0u;
3654  // The size has to be 2.
3655  rop._mp_size = 2;
3656  if (sign == -1) {
3657  rop._mp_size = -2;
3658  }
3659  return 0u;
3660  }
3661  // Temp hi lo limbs to store the result that will eventually go into rop.
3662  ::mp_limb_t lo = n.m_limbs[0u], hi = n.m_limbs[1u];
3663  if (s > GMP_NUMB_BITS) {
3664  if (mppp_unlikely(asize == 2)) {
3665  return std::size_t(s) / GMP_NUMB_BITS + 1u + std::size_t(asize);
3666  }
3667  // Move lo to hi and set lo to zero.
3668  hi = n.m_limbs[0u];
3669  lo = 0u;
3670  // Update the shift.
3671  s = static_cast<::mp_bitcnt_t>(s - GMP_NUMB_BITS);
3672  }
3673  // Check that hi will not be shifted too much. Note that
3674  // here and below s can never be zero, so we never shift too much.
3675  assert(s > 0u && s < GMP_NUMB_BITS);
3676  if (mppp_unlikely((hi & GMP_NUMB_MASK) >> (GMP_NUMB_BITS - s))) {
3677  return 3u;
3678  }
3679  // Shift hi and lo. hi gets the carry over from lo.
3680  hi = ((hi & GMP_NUMB_MASK) << s) + ((lo & GMP_NUMB_MASK) >> (GMP_NUMB_BITS - s));
3681  // NOTE: here the result needs to be masked as well as the shift could
3682  // end up writing in nail bits.
3683  lo = ((lo & GMP_NUMB_MASK) << s) & GMP_NUMB_MASK;
3684  // Write rop.
3685  rop.m_limbs[0u] = lo;
3686  rop.m_limbs[1u] = hi;
3687  // asize is at least 1.
3688  rop._mp_size = 1 + (hi != 0u);
3689  if (sign == -1) {
3690  rop._mp_size = -rop._mp_size;
3691  }
3692  return 0u;
3693  }
3694  static std::size_t static_mul_2exp(s_int &rop, const s_int &n, ::mp_bitcnt_t s)
3695  {
3696  const std::size_t retval = static_mul_2exp_impl(rop, n, s, static_mul_2exp_algo<s_int>{});
3697  if (static_mul_2exp_algo<s_int>::value == 0 && retval == 0u) {
3698  rop.zero_unused_limbs();
3699  }
3700  return retval;
3701  }
3702 
3703 public:
3705 
3712  friend void mul_2exp(mp_integer &rop, const mp_integer &n, ::mp_bitcnt_t s)
3713  {
3714  const bool sr = rop.is_static(), sn = n.is_static();
3715  std::size_t size_hint = 0u;
3716  if (mppp_likely(sr && sn)) {
3717  size_hint = static_mul_2exp(rop.m_int.g_st(), n.m_int.g_st(), s);
3718  if (mppp_likely(size_hint == 0u)) {
3719  return;
3720  }
3721  }
3722  if (sr) {
3723  rop.m_int.promote(size_hint);
3724  }
3725  ::mpz_mul_2exp(&rop.m_int.g_dy(), n.get_mpz_view(), s);
3726  }
3727 #if defined(_MSC_VER)
3728  template <typename T>
3729  friend shift_op_enabler<T> operator<<(const mp_integer &n, T s)
3730 #else
3731 
3740  template <typename T, shift_op_enabler<T> = 0>
3741  friend mp_integer operator<<(const mp_integer &n, T s)
3742 #endif
3743  {
3744  mp_integer retval;
3745  mul_2exp(retval, n, cast_to_bitcnt(s));
3746  return retval;
3747  }
3748 #if defined(_MSC_VER)
3749  template <typename T>
3750  shift_op_enabler<T> &operator<<=(T s)
3751 #else
3752 
3760  template <typename T, shift_op_enabler<T> = 0>
3762 #endif
3763  {
3764  mul_2exp(*this, *this, cast_to_bitcnt(s));
3765  return *this;
3766  }
3767 
3768 private:
3769  // Selection of the algorithm for static tdiv_q_2exp.
3770  template <typename SInt>
3771  using static_tdiv_q_2exp_algo = std::integral_constant<int, SInt::s_size == 1 ? 1 : (SInt::s_size == 2 ? 2 : 0)>;
3772  // mpn implementation.
3773  static void static_tdiv_q_2exp_impl(s_int &rop, const s_int &n, ::mp_bitcnt_t s,
3774  const std::integral_constant<int, 0> &)
3775  {
3776  mpz_size_t asize = n._mp_size;
3777  if (s == 0u || asize == 0) {
3778  // If shift is zero, or the operand is zero, write n into rop and return.
3779  rop = n;
3780  return;
3781  }
3782  // Finish setting up asize and sign.
3783  int sign = asize != 0;
3784  if (asize < 0) {
3785  asize = -asize;
3786  sign = -1;
3787  }
3788  // ls: number of entire limbs shifted.
3789  // rs: effective shift that will be passed to the mpn function.
3790  const auto ls = s / GMP_NUMB_BITS, rs = s % GMP_NUMB_BITS;
3791  if (ls >= std::size_t(asize)) {
3792  // If we shift by a number of entire limbs equal to or larger than the asize,
3793  // the result will be zero.
3794  rop._mp_size = 0;
3795  return;
3796  }
3797  // The maximum new asize will be the old asize minus ls.
3798  const mpz_size_t new_asize = asize - static_cast<mpz_size_t>(ls);
3799  if (rs) {
3800  // Perform the shift via the mpn function, if we are effectively shifting at least 1 bit.
3801  // Overlapping is fine, as it is guaranteed that rop.m_limbs.data() <= n.m_limbs.data() + ls.
3802  ::mpn_rshift(rop.m_limbs.data(), n.m_limbs.data() + ls, static_cast<::mp_size_t>(new_asize), unsigned(rs));
3803  } else {
3804  // Otherwise, just copy over (the mpn function requires the shift to be at least 1).
3805  // NOTE: std::move requires that the destination iterator is not within the input range.
3806  // Here we are sure that ls > 0 because we don't have a remainder, and the zero shift case
3807  // was already handled above.
3808  assert(ls);
3809  std::move(n.m_limbs.begin() + ls, n.m_limbs.begin() + asize, rop.m_limbs.begin());
3810  }
3811  // Set the size. We need to check if the top limb is zero.
3812  rop._mp_size = new_asize - ((rop.m_limbs[std::size_t(new_asize - 1)] & GMP_NUMB_MASK) == 0u);
3813  if (sign == -1) {
3814  rop._mp_size = -rop._mp_size;
3815  }
3816  }
3817  // 1-limb optimisation.
3818  static void static_tdiv_q_2exp_impl(s_int &rop, const s_int &n, ::mp_bitcnt_t s,
3819  const std::integral_constant<int, 1> &)
3820  {
3821  const ::mp_limb_t l = n.m_limbs[0] & GMP_NUMB_MASK;
3822  if (s == 0u || l == 0u) {
3823  rop = n;
3824  return;
3825  }
3826  if (s >= GMP_NUMB_BITS) {
3827  // We are shifting by the limb's bit size or greater, the result will be zero.
3828  rop._mp_size = 0;
3829  rop.m_limbs[0] = 0u;
3830  return;
3831  }
3832  // Compute the result.
3833  const auto res = l >> s;
3834  // Size is the original one or zero.
3835  rop._mp_size = res ? n._mp_size : 0;
3836  rop.m_limbs[0] = res;
3837  }
3838  // 2-limb optimisation.
3839  static void static_tdiv_q_2exp_impl(s_int &rop, const s_int &n, ::mp_bitcnt_t s,
3840  const std::integral_constant<int, 2> &)
3841  {
3842  mpz_size_t asize = n._mp_size;
3843  if (s == 0u || asize == 0) {
3844  rop = n;
3845  return;
3846  }
3847  int sign = asize != 0;
3848  if (asize < 0) {
3849  asize = -asize;
3850  sign = -1;
3851  }
3852  // If shift is too large, zero the result and return.
3853  static_assert(GMP_NUMB_BITS
3854  < std::numeric_limits<typename std::decay<decltype(GMP_NUMB_BITS)>::type>::max() / 2u,
3855  "Overflow error.");
3856  if (s >= 2u * GMP_NUMB_BITS) {
3857  rop._mp_size = 0;
3858  rop.m_limbs[0u] = 0u;
3859  rop.m_limbs[1u] = 0u;
3860  return;
3861  }
3862  if (s >= GMP_NUMB_BITS) {
3863  // NOTE: here the effective shift < GMP_NUMB_BITS, otherwise it would have been caught
3864  // in the check above.
3865  const auto lo = (n.m_limbs[1u] & GMP_NUMB_MASK) >> (s - GMP_NUMB_BITS);
3866  // The size could be zero or +-1, depending
3867  // on the new content of m_limbs[0] and the previous
3868  // sign of _mp_size.
3869  rop._mp_size = lo ? sign : 0;
3870  rop.m_limbs[0u] = lo;
3871  rop.m_limbs[1u] = 0u;
3872  return;
3873  }
3874  assert(s > 0u && s < GMP_NUMB_BITS);
3875  // This represents the bits in hi that will be shifted down into lo.
3876  // We move them up so we can add tmp to the new lo to account for them.
3877  // NOTE: mask the final result to avoid spillover into potential nail bits.
3878  const auto tmp = ((n.m_limbs[1u] & GMP_NUMB_MASK) << (GMP_NUMB_BITS - s)) & GMP_NUMB_MASK;
3879  rop.m_limbs[0u] = ((n.m_limbs[0u] & GMP_NUMB_MASK) >> s) + tmp;
3880  rop.m_limbs[1u] = (n.m_limbs[1u] & GMP_NUMB_MASK) >> s;
3881  // The effective shift was less than 1 entire limb. The new asize must be the old one,
3882  // or one less than that.
3883  rop._mp_size = asize - ((rop.m_limbs[std::size_t(asize - 1)] & GMP_NUMB_MASK) == 0u);
3884  if (sign == -1) {
3885  rop._mp_size = -rop._mp_size;
3886  }
3887  }
3888  static void static_tdiv_q_2exp(s_int &rop, const s_int &n, ::mp_bitcnt_t s)
3889  {
3890  static_tdiv_q_2exp_impl(rop, n, s, static_tdiv_q_2exp_algo<s_int>{});
3891  if (static_tdiv_q_2exp_algo<s_int>::value == 0) {
3892  rop.zero_unused_limbs();
3893  }
3894  }
3895 
3896 public:
3898 
3906  friend void tdiv_q_2exp(mp_integer &rop, const mp_integer &n, ::mp_bitcnt_t s)
3907  {
3908  const bool sr = rop.is_static(), sn = n.is_static();
3909  if (mppp_likely(sr && sn)) {
3910  static_tdiv_q_2exp(rop.m_int.g_st(), n.m_int.g_st(), s);
3911  return;
3912  }
3913  if (sr) {
3914  rop.m_int.promote();
3915  }
3916  ::mpz_tdiv_q_2exp(&rop.m_int.g_dy(), n.get_mpz_view(), s);
3917  }
3918 #if defined(_MSC_VER)
3919  template <typename T>
3920  friend shift_op_enabler<T> operator>>(const mp_integer &n, T s)
3921 #else
3922 
3931  template <typename T, shift_op_enabler<T> = 0>
3932  friend mp_integer operator>>(const mp_integer &n, T s)
3933 #endif
3934  {
3935  mp_integer retval;
3936  tdiv_q_2exp(retval, n, cast_to_bitcnt(s));
3937  return retval;
3938  }
3939 #if defined(_MSC_VER)
3940  template <typename T>
3941  shift_op_enabler<T> &operator>>=(T s)
3942 #else
3943 
3951  template <typename T, shift_op_enabler<T> = 0>
3953 #endif
3954  {
3955  tdiv_q_2exp(*this, *this, cast_to_bitcnt(s));
3956  return *this;
3957  }
3958 
3959 private:
3960  // Selection of the algorithm for static cmp.
3961  template <typename SInt>
3962  using static_cmp_algo = std::integral_constant<int, SInt::s_size == 1 ? 1 : (SInt::s_size == 2 ? 2 : 0)>;
3963  // mpn implementation.
3964  static int static_cmp(const s_int &n1, const s_int &n2, const std::integral_constant<int, 0> &)
3965  {
3966  if (n1._mp_size < n2._mp_size) {
3967  return -1;
3968  }
3969  if (n2._mp_size < n1._mp_size) {
3970  return 1;
3971  }
3972  // The two sizes are equal, compare the absolute values.
3973  const auto asize = n1._mp_size >= 0 ? n1._mp_size : -n1._mp_size;
3974  if (asize == 0) {
3975  // Both operands are zero.
3976  // NOTE: we do this special casing in order to avoid calling mpn_cmp() on zero operands. It seems to
3977  // work, but the official GMP docs say one is not supposed to call mpn functions on zero operands.
3978  return 0;
3979  }
3980  const int cmp_abs = ::mpn_cmp(n1.m_limbs.data(), n2.m_limbs.data(), static_cast<::mp_size_t>(asize));
3981  // If the values are non-negative, return the comparison of the absolute values, otherwise invert it.
3982  return (n1._mp_size >= 0) ? cmp_abs : -cmp_abs;
3983  }
3984  // 1-limb optimisation.
3985  static int static_cmp(const s_int &n1, const s_int &n2, const std::integral_constant<int, 1> &)
3986  {
3987  if (n1._mp_size < n2._mp_size) {
3988  return -1;
3989  }
3990  if (n2._mp_size < n1._mp_size) {
3991  return 1;
3992  }
3993  int cmp_abs = (n1.m_limbs[0u] & GMP_NUMB_MASK) > (n2.m_limbs[0u] & GMP_NUMB_MASK);
3994  if (!cmp_abs) {
3995  cmp_abs = -static_cast<int>((n1.m_limbs[0u] & GMP_NUMB_MASK) < (n2.m_limbs[0u] & GMP_NUMB_MASK));
3996  }
3997  return (n1._mp_size >= 0) ? cmp_abs : -cmp_abs;
3998  }
3999  // 2-limb optimisation.
4000  static int static_cmp(const s_int &n1, const s_int &n2, const std::integral_constant<int, 2> &)
4001  {
4002  if (n1._mp_size < n2._mp_size) {
4003  return -1;
4004  }
4005  if (n2._mp_size < n1._mp_size) {
4006  return 1;
4007  }
4008  auto asize = n1._mp_size >= 0 ? n1._mp_size : -n1._mp_size;
4009  while (asize != 0) {
4010  --asize;
4011  if ((n1.m_limbs[std::size_t(asize)] & GMP_NUMB_MASK) > (n2.m_limbs[std::size_t(asize)] & GMP_NUMB_MASK)) {
4012  return (n1._mp_size >= 0) ? 1 : -1;
4013  }
4014  if ((n1.m_limbs[std::size_t(asize)] & GMP_NUMB_MASK) < (n2.m_limbs[std::size_t(asize)] & GMP_NUMB_MASK)) {
4015  return (n1._mp_size >= 0) ? -1 : 1;
4016  }
4017  }
4018  return 0;
4019  }
4020  // Equality operator.
4021  // NOTE: special implementation instead of using cmp, this should be faster.
4022  static bool dispatch_equality(const mp_integer &a, const mp_integer &b)
4023  {
4024  const mp_size_t size_a = a.m_int.m_st._mp_size, size_b = b.m_int.m_st._mp_size;
4025  if (size_a != size_b) {
4026  return false;
4027  }
4028  const ::mp_limb_t *ptr_a, *ptr_b;
4029  std::size_t asize;
4030  if (a.is_static()) {
4031  ptr_a = a.m_int.g_st().m_limbs.data();
4032  asize = static_cast<std::size_t>((size_a >= 0) ? size_a : -size_a);
4033  } else {
4034  ptr_a = a.m_int.g_dy()._mp_d;
4035  asize = ::mpz_size(&a.m_int.g_dy());
4036  }
4037  if (b.is_static()) {
4038  ptr_b = b.m_int.g_st().m_limbs.data();
4039  } else {
4040  ptr_b = b.m_int.g_dy()._mp_d;
4041  }
4042  auto limb_cmp
4043  = [](const ::mp_limb_t &l1, const ::mp_limb_t &l2) { return (l1 & GMP_NUMB_MASK) == (l2 & GMP_NUMB_MASK); };
4044 #if defined(_MSC_VER)
4045  return std::equal(stdext::make_checked_array_iterator(ptr_a, asize),
4046  stdext::make_checked_array_iterator(ptr_a, asize) + asize,
4047  stdext::make_checked_array_iterator(ptr_b, asize), limb_cmp);
4048 #else
4049  return std::equal(ptr_a, ptr_a + asize, ptr_b, limb_cmp);
4050 #endif
4051  }
4052  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
4053  static bool dispatch_equality(const mp_integer &a, T n)
4054  {
4055  return dispatch_equality(a, mp_integer{n});
4056  }
4057  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
4058  static bool dispatch_equality(T n, const mp_integer &a)
4059  {
4060  return dispatch_equality(a, n);
4061  }
4062  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
4063  static bool dispatch_equality(const mp_integer &a, T x)
4064  {
4065  return static_cast<T>(a) == x;
4066  }
4067  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
4068  static bool dispatch_equality(T x, const mp_integer &a)
4069  {
4070  return dispatch_equality(a, x);
4071  }
4072  // Less-than operator.
4073  static bool dispatch_less_than(const mp_integer &a, const mp_integer &b)
4074  {
4075  return cmp(a, b) < 0;
4076  }
4077  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
4078  static bool dispatch_less_than(const mp_integer &a, T n)
4079  {
4080  return dispatch_less_than(a, mp_integer{n});
4081  }
4082  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
4083  static bool dispatch_less_than(T n, const mp_integer &a)
4084  {
4085  return dispatch_greater_than(a, mp_integer{n});
4086  }
4087  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
4088  static bool dispatch_less_than(const mp_integer &a, T x)
4089  {
4090  return static_cast<T>(a) < x;
4091  }
4092  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
4093  static bool dispatch_less_than(T x, const mp_integer &a)
4094  {
4095  return dispatch_greater_than(a, x);
4096  }
4097  // Greater-than operator.
4098  static bool dispatch_greater_than(const mp_integer &a, const mp_integer &b)
4099  {
4100  return cmp(a, b) > 0;
4101  }
4102  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
4103  static bool dispatch_greater_than(const mp_integer &a, T n)
4104  {
4105  return dispatch_greater_than(a, mp_integer{n});
4106  }
4107  template <typename T, enable_if_t<is_supported_integral<T>::value, int> = 0>
4108  static bool dispatch_greater_than(T n, const mp_integer &a)
4109  {
4110  return dispatch_less_than(a, mp_integer{n});
4111  }
4112  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
4113  static bool dispatch_greater_than(const mp_integer &a, T x)
4114  {
4115  return static_cast<T>(a) > x;
4116  }
4117  template <typename T, enable_if_t<is_supported_float<T>::value, int> = 0>
4118  static bool dispatch_greater_than(T x, const mp_integer &a)
4119  {
4120  return dispatch_less_than(a, x);
4121  }
4122 // The enabler for relational operators.
4123 #if defined(_MSC_VER)
4124  template <typename T, typename U>
4125  using rel_enabler = enable_if_t<!std::is_same<void, common_t<T, U>>::value, bool>;
4126 #else
4127  template <typename T, typename U>
4128  using rel_enabler = enable_if_t<!std::is_same<void, common_t<T, U>>::value, int>;
4129 #endif
4130 
4131 public:
4133 
4140  friend int cmp(const mp_integer &op1, const mp_integer &op2)
4141  {
4142  const bool s1 = op1.is_static(), s2 = op2.is_static();
4143  if (mppp_likely(s1 && s2)) {
4144  return static_cmp(op1.m_int.g_st(), op2.m_int.g_st(), static_cmp_algo<s_int>{});
4145  }
4146  return ::mpz_cmp(op1.get_mpz_view(), op2.get_mpz_view());
4147  }
4148 #if defined(_MSC_VER)
4149  template <typename T, typename U>
4150  friend rel_enabler<T, U> operator==(const T &op1, const U &op2)
4151 #else
4152 
4159  template <typename T, typename U, rel_enabler<T, U> = 0>
4160  friend bool operator==(const T &op1, const U &op2)
4161 #endif
4162  {
4163  return dispatch_equality(op1, op2);
4164  }
4165 #if defined(_MSC_VER)
4166  template <typename T, typename U>
4167  friend rel_enabler<T, U> operator!=(const T &op1, const U &op2)
4168 #else
4169 
4176  template <typename T, typename U, rel_enabler<T, U> = 0>
4177  friend bool operator!=(const T &op1, const U &op2)
4178 #endif
4179  {
4180  return !(op1 == op2);
4181  }
4182 #if defined(_MSC_VER)
4183  template <typename T, typename U>
4184  friend rel_enabler<T, U> operator<(const T &op1, const U &op2)
4185 #else
4186 
4193  template <typename T, typename U, rel_enabler<T, U> = 0>
4194  friend bool operator<(const T &op1, const U &op2)
4195 #endif
4196  {
4197  return dispatch_less_than(op1, op2);
4198  }
4199 #if defined(_MSC_VER)
4200  template <typename T, typename U>
4201  friend rel_enabler<T, U> operator>=(const T &op1, const U &op2)
4202 #else
4203 
4210  template <typename T, typename U, rel_enabler<T, U> = 0>
4211  friend bool operator>=(const T &op1, const U &op2)
4212 #endif
4213  {
4214  return !(op1 < op2);
4215  }
4216 #if defined(_MSC_VER)
4217  template <typename T, typename U>
4218  friend rel_enabler<T, U> operator>(const T &op1, const U &op2)
4219 #else
4220 
4227  template <typename T, typename U, rel_enabler<T, U> = 0>
4228  friend bool operator>(const T &op1, const U &op2)
4229 #endif
4230  {
4231  return dispatch_greater_than(op1, op2);
4232  }
4233 #if defined(_MSC_VER)
4234  template <typename T, typename U>
4235  friend rel_enabler<T, U> operator<=(const T &op1, const U &op2)
4236 #else
4237 
4244  template <typename T, typename U, rel_enabler<T, U> = 0>
4245  friend bool operator<=(const T &op1, const U &op2)
4246 #endif
4247  {
4248  return !(op1 > op2);
4249  }
4251 
4258  friend void pow_ui(mp_integer &rop, const mp_integer &base, unsigned long exp)
4259  {
4260  if (rop.is_static()) {
4261  MPPP_MAYBE_TLS mpz_raii tmp;
4262  ::mpz_pow_ui(&tmp.m_mpz, base.get_mpz_view(), exp);
4263  rop = mp_integer(&tmp.m_mpz);
4264  } else {
4265  ::mpz_pow_ui(&rop.m_int.g_dy(), base.get_mpz_view(), exp);
4266  }
4267  }
4269 
4275  friend mp_integer pow_ui(const mp_integer &base, unsigned long exp)
4276  {
4277  mp_integer retval;
4278  pow_ui(retval, base, exp);
4279  return retval;
4280  }
4281 
4282 private:
4283  template <typename T, enable_if_t<std::is_integral<T>::value, int> = 0>
4284  static bool exp_nonnegative(const T &exp)
4285  {
4286  return exp >= T(0);
4287  }
4288  static bool exp_nonnegative(const mp_integer &exp)
4289  {
4290  return exp.sgn() >= 0;
4291  }
4292  template <typename T, enable_if_t<std::is_integral<T>::value, int> = 0>
4293  static unsigned long exp_to_ulong(const T &exp)
4294  {
4295  assert(exp >= T(0));
4296  // NOTE: make_unsigned<T>::type is T if T is already unsigned.
4297  if (mppp_unlikely(static_cast<typename std::make_unsigned<T>::type>(exp)
4298  > std::numeric_limits<unsigned long>::max())) {
4299  throw std::overflow_error("Cannot convert the integral value " + std::to_string(exp)
4300  + " to unsigned long: the value is too large.");
4301  }
4302  return static_cast<unsigned long>(exp);
4303  }
4304  static unsigned long exp_to_ulong(const mp_integer &exp)
4305  {
4306  try {
4307  return static_cast<unsigned long>(exp);
4308  } catch (const std::overflow_error &) {
4309  // Rewrite the error message.
4310  throw std::overflow_error("Cannot convert the integral value " + exp.to_string()
4311  + " to unsigned long: the value is too large.");
4312  }
4313  }
4314  template <typename T, enable_if_t<std::is_integral<T>::value, int> = 0>
4315  static bool base_is_zero(const T &base)
4316  {
4317  return base == T(0);
4318  }
4319  static bool base_is_zero(const mp_integer &base)
4320  {
4321  return base.is_zero();
4322  }
4323  template <typename T, enable_if_t<std::is_integral<T>::value, int> = 0>
4324  static bool exp_is_odd(const T &exp)
4325  {
4326  return (exp % T(2)) != T(0);
4327  }
4328  static bool exp_is_odd(const mp_integer &exp)
4329  {
4330  return exp.odd_p();
4331  }
4332  template <typename T, enable_if_t<std::is_integral<T>::value, int> = 0>
4333  static std::string exp_to_string(const T &exp)
4334  {
4335  return std::to_string(exp);
4336  }
4337  static std::string exp_to_string(const mp_integer &exp)
4338  {
4339  return exp.to_string();
4340  }
4341  // Implementation of pow().
4342  // mp_integer -- integral overload.
4343  template <typename T, enable_if_t<disjunction<std::is_same<T, mp_integer>, std::is_integral<T>>::value, int> = 0>
4344  static mp_integer pow_impl(const mp_integer &base, const T &exp)
4345  {
4346  mp_integer rop;
4347  if (exp_nonnegative(exp)) {
4348  pow_ui(rop, base, exp_to_ulong(exp));
4349  } else if (mppp_unlikely(base_is_zero(base))) {
4350  // 0**-n is a division by zero.
4351  throw zero_division_error("cannot raise zero to the negative power " + exp_to_string(exp));
4352  } else if (base.is_one()) {
4353  // 1**n == 1.
4354  rop = 1;
4355  } else if (base.is_negative_one()) {
4356  if (exp_is_odd(exp)) {
4357  // 1**(-2n-1) == -1.
4358  rop = -1;
4359  } else {
4360  // 1**(-2n) == 1.
4361  rop = 1;
4362  }
4363  } else {
4364  // m**-n == 1 / m**n == 0.
4365  rop = 0;
4366  }
4367  return rop;
4368  }
4369  // C++ integral -- mp_integer overload.
4370  template <typename T, enable_if_t<std::is_integral<T>::value, int> = 0>
4371  static mp_integer pow_impl(const T &base, const mp_integer &exp)
4372  {
4373  return pow_impl(mp_integer{base}, exp);
4374  }
4375  // mp_integer -- FP overload.
4376  template <typename T, enable_if_t<std::is_floating_point<T>::value, int> = 0>
4377  static T pow_impl(const mp_integer &base, const T &exp)
4378  {
4379  return std::pow(static_cast<T>(base), exp);
4380  }
4381  // FP -- mp_integer overload.
4382  template <typename T, enable_if_t<std::is_floating_point<T>::value, int> = 0>
4383  static T pow_impl(const T &base, const mp_integer &exp)
4384  {
4385  return std::pow(base, static_cast<T>(exp));
4386  }
4387 
4388 public:
4390 
4409  template <typename T, typename U>
4410  friend common_t<T, U> pow(const T &base, const U &exp)
4411  {
4412  return pow_impl(base, exp);
4413  }
4415 
4421  {
4422  if (is_static()) {
4423  if (m_int.g_st()._mp_size < 0) {
4424  m_int.g_st()._mp_size = -m_int.g_st()._mp_size;
4425  }
4426  } else {
4427  ::mpz_abs(&m_int.g_dy(), &m_int.g_dy());
4428  }
4429  return *this;
4430  }
4432 
4438  friend void abs(mp_integer &rop, const mp_integer &n)
4439  {
4440  rop = n;
4441  rop.abs();
4442  }
4444 
4449  friend mp_integer abs(const mp_integer &n)
4450  {
4451  mp_integer ret(n);
4452  ret.abs();
4453  return ret;
4454  }
4456 
4463  friend std::size_t hash(const mp_integer &n)
4464  {
4465  std::size_t asize;
4466  // NOTE: size is part of the common initial sequence.
4467  const mpz_size_t size = n.m_int.m_st._mp_size;
4468  const ::mp_limb_t *ptr;
4469  if (n.m_int.is_static()) {
4470  asize = static_cast<std::size_t>((size >= 0) ? size : -size);
4471  ptr = n.m_int.g_st().m_limbs.data();
4472  } else {
4473  asize = ::mpz_size(&n.m_int.g_dy());
4474  ptr = n.m_int.g_dy()._mp_d;
4475  }
4476  // Init the retval as the signed size.
4477  auto retval = static_cast<std::size_t>(size);
4478  // Combine the limbs.
4479  for (std::size_t i = 0; i < asize; ++i) {
4480  // The hash combiner. This is lifted directly from Boost. See also:
4481  // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2014/n3876.pdf
4482  retval ^= (ptr[i] & GMP_NUMB_MASK) + std::size_t(0x9e3779b9) + (retval << 6) + (retval >> 2);
4483  }
4484  return retval;
4485  }
4486 
4487 private:
4488  static void nextprime_impl(mp_integer &rop, const mp_integer &n)
4489  {
4490  if (rop.is_static()) {
4491  MPPP_MAYBE_TLS mpz_raii tmp;
4492  ::mpz_nextprime(&tmp.m_mpz, n.get_mpz_view());
4493  rop = mp_integer(&tmp.m_mpz);
4494  } else {
4495  ::mpz_nextprime(&rop.m_int.g_dy(), n.get_mpz_view());
4496  }
4497  }
4498 
4499 public:
4501 
4508  friend void nextprime(mp_integer &rop, const mp_integer &n)
4509  {
4510  // NOTE: nextprime on negative numbers always returns 2.
4511  nextprime_impl(rop, n);
4512  }
4514 
4520  {
4521  mp_integer retval;
4522  nextprime_impl(retval, n);
4523  return retval;
4524  }
4526 
4532  {
4533  nextprime_impl(*this, *this);
4534  return *this;
4535  }
4537 
4548  int probab_prime_p(int reps = 25) const
4549  {
4550  if (mppp_unlikely(reps < 1)) {
4551  throw std::invalid_argument("The number of primality tests must be at least 1, but a value of "
4552  + std::to_string(reps) + " was provided instead");
4553  }
4554  if (mppp_unlikely(sgn() < 0)) {
4555  throw std::invalid_argument("Cannot run primality tests on the negative number " + to_string());
4556  }
4557  return ::mpz_probab_prime_p(get_mpz_view(), reps);
4558  }
4560 
4570  friend int probab_prime_p(const mp_integer &n, int reps = 25)
4571  {
4572  return n.probab_prime_p(reps);
4573  }
4574 
4575 private:
4576  static void sqrt_impl(mp_integer &rop, const mp_integer &n)
4577  {
4578  using mppp_impl::copy_limbs_no;
4579  if (mppp_unlikely(n.m_int.m_st._mp_size < 0)) {
4580  throw std::domain_error("Cannot compute the square root of the negative number " + n.to_string());
4581  }
4582  const bool sr = rop.is_static(), sn = n.is_static();
4583  if (mppp_likely(sr && sn)) {
4584  s_int &rs = rop.m_int.g_st();
4585  const s_int &ns = n.m_int.g_st();
4586  // NOTE: we know this is not negative, from the check above.
4587  const mpz_size_t size = ns._mp_size;
4588  if (!size) {
4589  // Special casing for zero.
4590  rs._mp_size = 0;
4591  rs.zero_unused_limbs();
4592  return;
4593  }
4594  // In case of overlap we need to go through a tmp variable.
4595  std::array<::mp_limb_t, SSize> tmp;
4596  const bool overlap = (&rs == &ns);
4597  auto out_ptr = overlap ? tmp.data() : rs.m_limbs.data();
4598  ::mpn_sqrtrem(out_ptr, nullptr, ns.m_limbs.data(), static_cast<::mp_size_t>(size));
4599  // Compute the size of the output (which is ceil(size / 2)).
4600  const mpz_size_t new_size = size / 2 + size % 2;
4601  assert(!new_size || (out_ptr[new_size - 1] & GMP_NUMB_MASK));
4602  // Write out the result.
4603  rs._mp_size = new_size;
4604  if (overlap) {
4605  copy_limbs_no(out_ptr, out_ptr + new_size, rs.m_limbs.data());
4606  }
4607  // Clear out unused limbs.
4608  rs.zero_unused_limbs();
4609  return;
4610  }
4611  if (sr) {
4612  rop.promote();
4613  }
4614  ::mpz_sqrt(&rop.m_int.g_dy(), n.get_mpz_view());
4615  }
4616 
4617 public:
4619 
4627  {
4628  sqrt_impl(*this, *this);
4629  return *this;
4630  }
4632 
4640  friend void sqrt(mp_integer &rop, const mp_integer &n)
4641  {
4642  sqrt_impl(rop, n);
4643  }
4645 
4652  friend mp_integer sqrt(const mp_integer &n)
4653  {
4654  mp_integer retval;
4655  sqrt_impl(retval, n);
4656  return retval;
4657  }
4659 
4662  bool odd_p() const
4663  {
4664  if (is_static()) {
4665  // NOTE: as usual we assume that a zero static integer has all limbs set to zero.
4666  return (m_int.g_st().m_limbs[0] & GMP_NUMB_MASK) & ::mp_limb_t(1);
4667  }
4668  return mpz_odd_p(&m_int.g_dy());
4669  }
4671 
4676  friend bool odd_p(const mp_integer &n)
4677  {
4678  return n.odd_p();
4679  }
4681 
4684  bool even_p() const
4685  {
4686  return !odd_p();
4687  }
4689 
4694  friend bool even_p(const mp_integer &n)
4695  {
4696  return n.even_p();
4697  }
4699 
4707  friend void fac_ui(mp_integer &rop, unsigned long n)
4708  {
4709  // NOTE: we put a limit here because the GMP function just crashes and burns
4710  // if n is too large, and n does not even need to be that large.
4711  constexpr auto max_fac = 1000000ull;
4712  if (mppp_unlikely(n > max_fac)) {
4713  throw std::invalid_argument(
4714  "The value " + std::to_string(n)
4715  + " is too large to be used as input for the factorial function (the maximum allowed value is "
4716  + std::to_string(max_fac) + ")");
4717  }
4718  if (rop.is_static()) {
4719  MPPP_MAYBE_TLS mpz_raii tmp;
4720  ::mpz_fac_ui(&tmp.m_mpz, n);
4721  rop = mp_integer(&tmp.m_mpz);
4722  } else {
4723  ::mpz_fac_ui(&rop.m_int.g_dy(), n);
4724  }
4725  }
4727 
4735  friend void bin_ui(mp_integer &rop, const mp_integer &n, unsigned long k)
4736  {
4737  if (rop.is_static()) {
4738  MPPP_MAYBE_TLS mpz_raii tmp;
4739  ::mpz_bin_ui(&tmp.m_mpz, n.get_mpz_view(), k);
4740  rop = mp_integer(&tmp.m_mpz);
4741  } else {
4742  ::mpz_bin_ui(&rop.m_int.g_dy(), n.get_mpz_view(), k);
4743  }
4744  }
4746 
4752  friend mp_integer bin_ui(const mp_integer &n, unsigned long k)
4753  {
4754  mp_integer retval;
4755  bin_ui(retval, n, k);
4756  return retval;
4757  }
4758 
4759 private:
4760  template <typename T, typename U>
4761  using binomial_enabler_impl = std::
4762  integral_constant<bool, disjunction<conjunction<std::is_same<mp_integer, T>, std::is_same<mp_integer, U>>,
4763  conjunction<std::is_same<mp_integer, T>, is_supported_integral<U>>,
4764  conjunction<std::is_same<mp_integer, U>, is_supported_integral<T>>>::value>;
4765 #if defined(_MSC_VER)
4766  template <typename T, typename U>
4767  using binomial_enabler = enable_if_t<binomial_enabler_impl<T, U>::value, mp_integer>;
4768 #else
4769  template <typename T, typename U>
4770  using binomial_enabler = enable_if_t<binomial_enabler_impl<T, U>::value, int>;
4771 #endif
4772  template <typename T>
4773  static mp_integer binomial_impl(const mp_integer &n, const T &k)
4774  {
4775  // NOTE: here we re-use some helper methods used in the implementation of pow().
4776  if (exp_nonnegative(k)) {
4777  return bin_ui(n, exp_to_ulong(k));
4778  }
4779  // This is the case k < 0, handled according to:
4780  // http://arxiv.org/abs/1105.3689/
4781  if (n.sgn() >= 0) {
4782  // n >= 0, k < 0.
4783  return mp_integer{};
4784  }
4785  // n < 0, k < 0.
4786  if (k <= n) {
4787  // The formula is: (-1)**(n-k) * binomial(-k-1,n-k).
4788  // Cache n-k.
4789  const mp_integer nmk{n - k};
4790  mp_integer tmp{k};
4791  ++tmp;
4792  tmp.neg();
4793  auto retval = bin_ui(tmp, exp_to_ulong(nmk));
4794  if (nmk.odd_p()) {
4795  retval.neg();
4796  }
4797  return retval;
4798  }
4799  return mp_integer{};
4800  }
4801  template <typename T, enable_if_t<std::is_integral<T>::value, int> = 0>
4802  static mp_integer binomial_impl(const T &n, const mp_integer &k)
4803  {
4804  return binomial_impl(mp_integer{n}, k);
4805  }
4806 
4807 public:
4808 #if defined(_MSC_VER)
4809  template <typename T, typename U>
4810  friend binomial_enabler<T, U> binomial(const T &n, const U &k)
4811 #else
4812 
4832  template <typename T, typename U, binomial_enabler<T, U> = 0>
4833  friend mp_integer binomial(const T &n, const U &k)
4834 #endif
4835  {
4836  return binomial_impl(n, k);
4837  }
4838 
4839 private:
4840  // Exact division.
4841  // mpn implementation.
4842  static void static_divexact_impl(s_int &q, const s_int &op1, const s_int &op2, mpz_size_t asize1, mpz_size_t asize2,
4843  int sign1, int sign2, const std::integral_constant<int, 0> &)
4844  {
4845  using mppp_impl::copy_limbs_no;
4846  if (asize1 == 0) {
4847  // Special casing if the numerator is zero (the mpn functions do not work with zero operands).
4848  q._mp_size = 0;
4849  return;
4850  }
4851 #if __GNU_MP_VERSION > 6 || (__GNU_MP_VERSION == 6 && __GNU_MP_VERSION_MINOR >= 1)
4852  // NOTE: mpn_divexact_1() is available since GMP 6.1.0.
4853  if (asize2 == 1) {
4854  // Optimisation in case the dividend has only 1 limb.
4855  // NOTE: overlapping arguments are fine here.
4856  ::mpn_divexact_1(q.m_limbs.data(), op1.m_limbs.data(), static_cast<::mp_size_t>(asize1), op2.m_limbs[0]);
4857  // Complete the quotient: compute size and sign.
4858  q._mp_size = asize1 - asize2 + 1;
4859  while (q._mp_size && !(q.m_limbs[static_cast<std::size_t>(q._mp_size - 1)] & GMP_NUMB_MASK)) {
4860  --q._mp_size;
4861  }
4862  if (sign1 != sign2) {
4863  q._mp_size = -q._mp_size;
4864  }
4865  return;
4866  }
4867 #else
4868  // Avoid compiler warnings for unused parameters.
4869  (void)sign1;
4870  (void)sign2;
4871  (void)asize2;
4872 #endif
4873  // General implementation (via the mpz function).
4874  MPPP_MAYBE_TLS mpz_raii tmp;
4875  ::mpz_divexact(&tmp.m_mpz, op1.get_mpz_view(), op2.get_mpz_view());
4876  // Copy over from the tmp struct into q.
4877  q._mp_size = tmp.m_mpz._mp_size;
4878  copy_limbs_no(tmp.m_mpz._mp_d, tmp.m_mpz._mp_d + (q._mp_size >= 0 ? q._mp_size : -q._mp_size),
4879  q.m_limbs.data());
4880  }
4881  // 1-limb optimisation.
4882  static void static_divexact_impl(s_int &q, const s_int &op1, const s_int &op2, mpz_size_t, mpz_size_t, int sign1,
4883  int sign2, const std::integral_constant<int, 1> &)
4884  {
4885  const ::mp_limb_t q_ = (op1.m_limbs[0] & GMP_NUMB_MASK) / (op2.m_limbs[0] & GMP_NUMB_MASK);
4886  // Write q.
4887  q._mp_size = (q_ != 0u);
4888  if (sign1 != sign2) {
4889  q._mp_size = -q._mp_size;
4890  }
4891  q.m_limbs[0] = q_;
4892  }
4893  // 2-limbs optimisation.
4894  static void static_divexact_impl(s_int &q, const s_int &op1, const s_int &op2, mpz_size_t asize1, mpz_size_t asize2,
4895  int sign1, int sign2, const std::integral_constant<int, 2> &)
4896  {
4897  if (asize1 < 2 && asize2 < 2) {
4898  // 1-limb optimisation.
4899  const ::mp_limb_t q_ = op1.m_limbs[0] / op2.m_limbs[0];
4900  q._mp_size = (q_ != 0u);
4901  if (sign1 != sign2) {
4902  q._mp_size = -q._mp_size;
4903  }
4904  q.m_limbs[0] = q_;
4905  q.m_limbs[1] = 0u;
4906  return;
4907  }
4908  // General case.
4909  ::mp_limb_t q1, q2;
4910  dlimb_div(op1.m_limbs[0], op1.m_limbs[1], op2.m_limbs[0], op2.m_limbs[1], &q1, &q2);
4911  q._mp_size = q2 ? 2 : (q1 ? 1 : 0);
4912  if (sign1 != sign2) {
4913  q._mp_size = -q._mp_size;
4914  }
4915  q.m_limbs[0] = q1;
4916  q.m_limbs[1] = q2;
4917  }
4918  static void static_divexact(s_int &q, const s_int &op1, const s_int &op2)
4919  {
4920  mpz_size_t asize1 = op1._mp_size, asize2 = op2._mp_size;
4921  int sign1 = asize1 != 0, sign2 = asize2 != 0;
4922  if (asize1 < 0) {
4923  asize1 = -asize1;
4924  sign1 = -1;
4925  }
4926  if (asize2 < 0) {
4927  asize2 = -asize2;
4928  sign2 = -1;
4929  }
4930  assert(asize1 == 0 || asize2 <= asize1);
4931  // NOTE: use static_div_algo for the algorithm selection.
4932  static_divexact_impl(q, op1, op2, asize1, asize2, sign1, sign2, static_div_algo<s_int>{});
4933  if (static_div_algo<s_int>::value == 0) {
4934  q.zero_unused_limbs();
4935  }
4936  }
4937 
4938 public:
4940 
4949  friend void divexact(mp_integer &rop, const mp_integer &n, const mp_integer &d)
4950  {
4951  const bool sr = rop.is_static(), s1 = n.is_static(), s2 = d.is_static();
4952  if (mppp_likely(sr && s1 && s2)) {
4953  static_divexact(rop.m_int.g_st(), n.m_int.g_st(), d.m_int.g_st());
4954  // Division can never fail.
4955  return;
4956  }
4957  if (sr) {
4958  rop.m_int.promote();
4959  }
4960  ::mpz_divexact(&rop.m_int.g_dy(), n.get_mpz_view(), d.get_mpz_view());
4961  }
4963 
4971  friend mp_integer divexact(const mp_integer &n, const mp_integer &d)
4972  {
4973  mp_integer retval;
4974  divexact(retval, n, d);
4975  return retval;
4976  }
4977 
4978 private:
4979  static void static_gcd(s_int &rop, const s_int &op1, const s_int &op2)
4980  {
4981  using mppp_impl::copy_limbs_no;
4982  mpz_size_t asize1 = op1._mp_size, asize2 = op2._mp_size;
4983  if (asize1 < 0) {
4984  asize1 = -asize1;
4985  }
4986  if (asize2 < 0) {
4987  asize2 = -asize2;
4988  }
4989  // Handle zeroes.
4990  if (!asize1) {
4991  rop._mp_size = asize2;
4992  rop.m_limbs = op2.m_limbs;
4993  return;
4994  }
4995  if (!asize2) {
4996  rop._mp_size = asize1;
4997  rop.m_limbs = op1.m_limbs;
4998  return;
4999  }
5000  // Special casing if an operand has asize 1.
5001  if (asize1 == 1) {
5002  rop._mp_size = 1;
5003  rop.m_limbs[0] = ::mpn_gcd_1(op2.m_limbs.data(), static_cast<::mp_size_t>(asize2), op1.m_limbs[0]);
5004  return;
5005  }
5006  if (asize2 == 1) {
5007  rop._mp_size = 1;
5008  rop.m_limbs[0] = ::mpn_gcd_1(op1.m_limbs.data(), static_cast<::mp_size_t>(asize1), op2.m_limbs[0]);
5009  return;
5010  }
5011  // General case, via mpz.
5012  // NOTE: there is an mpn_gcd() function, but it seems difficult to use. Apparently, and contrary to
5013  // what stated in the latest documentation, the mpn function requires odd operands and bit size
5014  // (not only limb size!) of the second operand not greater than the first. See for instance the old
5015  // documentation:
5016  // ftp://ftp.gnu.org/old-gnu/Manuals/gmp-3.1.1/html_chapter/gmp_9.html
5017  // Indeed, compiling GMP in debug mode and then trying to use the mpn function without respecting the above
5018  // results in assertion failures. For now let's keep it like this, the small operand cases are handled above
5019  // (partially) via mpn_gcd_1(), and in the future we can also think about binary GCD for 1/2 limbs optimisation.
5020  MPPP_MAYBE_TLS mpz_raii tmp;
5021  ::mpz_gcd(&tmp.m_mpz, op1.get_mpz_view(), op2.get_mpz_view());
5022  // Copy over.
5023  rop._mp_size = tmp.m_mpz._mp_size;
5024  assert(rop._mp_size > 0);
5025  copy_limbs_no(tmp.m_mpz._mp_d, tmp.m_mpz._mp_d + rop._mp_size, rop.m_limbs.data());
5026  }
5027 
5028 public:
5030 
5038  friend void gcd(mp_integer &rop, const mp_integer &op1, const mp_integer &op2)
5039  {
5040  const bool sr = rop.is_static(), s1 = op1.is_static(), s2 = op2.is_static();
5041  if (mppp_likely(sr && s1 && s2)) {
5042  static_gcd(rop.m_int.g_st(), op1.m_int.g_st(), op2.m_int.g_st());
5043  rop.m_int.g_st().zero_unused_limbs();
5044  return;
5045  }
5046  if (sr) {
5047  rop.m_int.promote();
5048  }
5049  ::mpz_gcd(&rop.m_int.g_dy(), op1.get_mpz_view(), op2.get_mpz_view());
5050  }
5052 
5058  friend mp_integer gcd(const mp_integer &op1, const mp_integer &op2)
5059  {
5060  mp_integer retval;
5061  gcd(retval, op1, op2);
5062  return retval;
5063  }
5065 
5070  mppp_impl::integer_union<SSize> &_get_union()
5071  {
5072  return m_int;
5073  }
5075 
5080  const mppp_impl::integer_union<SSize> &_get_union() const
5081  {
5082  return m_int;
5083  }
5085 
5095  std::remove_extent<::mpz_t>::type *get_mpz_t()
5096  {
5097  promote();
5098  return &m_int.g_dy();
5099  }
5101 
5104  bool is_zero() const
5105  {
5106  return m_int.m_st._mp_size == 0;
5107  }
5109 
5114  friend bool is_zero(const mp_integer &n)
5115  {
5116  return n.is_zero();
5117  }
5118 
5119 private:
5120  // Implementation of is_one()/is_negative_one().
5121  template <int One>
5122  bool is_one_impl() const
5123  {
5124  if (m_int.m_st._mp_size != One) {
5125  return false;
5126  }
5127  // Get the pointer to the limbs.
5128  const ::mp_limb_t *ptr = is_static() ? m_int.g_st().m_limbs.data() : m_int.g_dy()._mp_d;
5129  return (ptr[0] & GMP_NUMB_MASK) == 1u;
5130  }
5131 
5132 public:
5134 
5137  bool is_one() const
5138  {
5139  return is_one_impl<1>();
5140  }
5142 
5147  friend bool is_one(const mp_integer &n)
5148  {
5149  return n.is_one();
5150  }
5152 
5155  bool is_negative_one() const
5156  {
5157  return is_one_impl<-1>();
5158  }
5160 
5165  friend bool is_negative_one(const mp_integer &n)
5166  {
5167  return n.is_negative_one();
5168  }
5169 
5170 private:
5171  mppp_impl::integer_union<SSize> m_int;
5172 };
5173 
5174 template <std::size_t SSize>
5175 constexpr std::size_t mp_integer<SSize>::ssize;
5176 
5177 namespace mppp_impl
5178 {
5179 
5180 // A small wrapper to avoid name clashing below, in the specialisation of std::hash.
5181 template <size_t SSize>
5182 inline std::size_t hash_wrapper(const mp_integer<SSize> &n)
5183 {
5184  return hash(n);
5185 }
5186 }
5187 }
5188 
5189 namespace std
5190 {
5191 
5193 template <size_t SSize>
5194 struct hash<MPPP_NAMESPACE::mp_integer<SSize>> {
5196  typedef MPPP_NAMESPACE::mp_integer<SSize> argument_type;
5198  typedef size_t result_type;
5200 
5206  {
5207  return MPPP_NAMESPACE::mppp_impl::hash_wrapper(n);
5208  }
5209 };
5210 }
5211 
5212 #if defined(_MSC_VER)
5213 
5214 #pragma warning(pop)
5215 
5216 #endif
5217 
5218 #endif
mp_integer(const char *s, int base=10)
Constructor from C string.
Definition: mp++.hpp:1205
bool demote()
Demote to static storage.
Definition: mp++.hpp:1581
friend void sub(mp_integer &rop, const mp_integer &op1, const mp_integer &op2)
Ternary subtraction.
Definition: mp++.hpp:2215
Multiprecision integer class.
Definition: mp++.hpp:869
friend T & operator+=(T &x, const mp_integer &n)
In-place addition for interoperable types.
Definition: mp++.hpp:2176
friend mp_integer neg(const mp_integer &n)
Unary negation.
Definition: mp++.hpp:1689
const mppp_impl::integer_union< SSize > & _get_union() const
Return a const reference to the internal union.
Definition: mp++.hpp:5080
friend bool operator>=(const T &op1, const U &op2)
Greater-than or equal operator.
Definition: mp++.hpp:4211
bool is_zero() const
Test if the value is zero.
Definition: mp++.hpp:5104
friend int sgn(const mp_integer &n)
Sign function.
Definition: mp++.hpp:1626
mp_integer & operator++()
Prefix increment.
Definition: mp++.hpp:2190
friend mp_integer bin_ui(const mp_integer &n, unsigned long k)
Binomial coefficient (binary version).
Definition: mp++.hpp:4752
mp_integer(const ::mpz_t n)
Constructor from mpz_t.
Definition: mp++.hpp:1244
std::remove_extent<::mpz_t >::type * get_mpz_t()
Get a pointer to the dynamic storage.
Definition: mp++.hpp:5095
friend void divexact(mp_integer &rop, const mp_integer &n, const mp_integer &d)
Exact division (ternary version).
Definition: mp++.hpp:4949
friend mp_integer operator<<(const mp_integer &n, T s)
Left shift operator.
Definition: mp++.hpp:3741
friend bool operator<(const T &op1, const U &op2)
Less-than operator.
Definition: mp++.hpp:4194
friend void gcd(mp_integer &rop, const mp_integer &op1, const mp_integer &op2)
GCD (ternary version).
Definition: mp++.hpp:5038
mp_integer & operator=(const T &x)
Generic assignment operator.
Definition: mp++.hpp:1284
mp_integer & operator+=(const T &op)
In-place addition operator.
Definition: mp++.hpp:2137
friend bool odd_p(const mp_integer &n)
Test if integer is odd.
Definition: mp++.hpp:4676
friend mp_integer nextprime(const mp_integer &n)
Compute next prime number (unary version).
Definition: mp++.hpp:4519
mp_integer & abs()
In-place absolute value.
Definition: mp++.hpp:4420
friend void bin_ui(mp_integer &rop, const mp_integer &n, unsigned long k)
Binomial coefficient (ternary version).
Definition: mp++.hpp:4735
std::string to_string(int base=10) const
Conversion to string.
Definition: mp++.hpp:1350
STL namespace.
friend mp_integer divexact(const mp_integer &n, const mp_integer &d)
Exact division (binary version).
Definition: mp++.hpp:4971
friend void fac_ui(mp_integer &rop, unsigned long n)
Factorial.
Definition: mp++.hpp:4707
friend common_t< T, U > operator-(const T &op1, const U &op2)
Binary subtraction operator.
Definition: mp++.hpp:2247
mppp_impl::integer_union< SSize > & _get_union()
Return a reference to the internal union.
Definition: mp++.hpp:5070
friend void addmul(mp_integer &rop, const mp_integer &op1, const mp_integer &op2)
Ternary multiply–accumulate.
Definition: mp++.hpp:3066
friend T & operator*=(T &x, const mp_integer &n)
In-place multiplication for interoperable types.
Definition: mp++.hpp:2842
friend common_t< T, U > operator*(const T &op1, const U &op2)
Binary multiplication operator.
Definition: mp++.hpp:2800
bool promote()
Promote to dynamic storage.
Definition: mp++.hpp:1566
friend std::istream & operator>>(std::istream &is, mp_integer &n)
Input stream operator for mp_integer.
Definition: mp++.hpp:1331
friend T & operator-=(T &x, const mp_integer &n)
In-place subtraction for interoperable types.
Definition: mp++.hpp:2289
friend bool even_p(const mp_integer &n)
Test if integer is even.
Definition: mp++.hpp:4694
friend void abs(mp_integer &rop, const mp_integer &n)
Binary absolute value.
Definition: mp++.hpp:4438
friend void nextprime(mp_integer &rop, const mp_integer &n)
Compute next prime number (binary version).
Definition: mp++.hpp:4508
bool is_negative_one() const
Test if the value is equal to minus one.
Definition: mp++.hpp:5155
mp_integer & operator>>=(T s)
In-place right shift operator.
Definition: mp++.hpp:3952
friend void neg(mp_integer &rop, const mp_integer &n)
Binary negation.
Definition: mp++.hpp:1678
friend int cmp(const mp_integer &op1, const mp_integer &op2)
Comparison function for mp_integer.
Definition: mp++.hpp:4140
bool odd_p() const
Test if value is odd.
Definition: mp++.hpp:4662
friend bool operator<=(const T &op1, const U &op2)
Less-than or equal operator.
Definition: mp++.hpp:4245
mp_integer & nextprime()
Compute next prime number (in-place version).
Definition: mp++.hpp:4531
mpz_view get_mpz_view() const
Get an mpz_t view.
Definition: mp++.hpp:1652
mp_integer & neg()
Negate in-place.
Definition: mp++.hpp:1662
mp_integer & operator--()
Prefix decrement.
Definition: mp++.hpp:2300
friend common_t< T, U > pow(const T &base, const U &exp)
Generic binary exponentiation.
Definition: mp++.hpp:4410
friend common_mod_t< T, U > operator%(const T &n, const U &d)
Binary modulo operator.
Definition: mp++.hpp:3497
size_t result_type
The result type.
Definition: mp++.hpp:5198
friend bool operator>(const T &op1, const U &op2)
Greater-than operator.
Definition: mp++.hpp:4228
std::size_t size() const
Size in limbs.
Definition: mp++.hpp:1600
std::size_t nbits() const
Size in bits.
Definition: mp++.hpp:1592
mp_integer & operator/=(const T &d)
In-place division operator.
Definition: mp++.hpp:3453
result_type operator()(const argument_type &n) const
Call operator.
Definition: mp++.hpp:5205
mp_integer operator+() const
Identity operator.
Definition: mp++.hpp:2111
int probab_prime_p(int reps=25) const
Test primality.
Definition: mp++.hpp:4548
math_binomial_type< T, U > binomial(const T &x, const U &y)
Generalised binomial coefficient.
Definition: binomial.hpp:215
bool is_static() const
Test for static storage.
Definition: mp++.hpp:1292
bool is_one() const
Test if the value is equal to one.
Definition: mp++.hpp:5137
Exception to signal division by zero.
Definition: mp++.hpp:705
friend common_t< T, U > operator/(const T &n, const U &d)
Binary division operator.
Definition: mp++.hpp:3438
friend std::size_t hash(const mp_integer &n)
Hash value.
Definition: mp++.hpp:4463
friend void sqrt(mp_integer &rop, const mp_integer &n)
Integer square root (binary version).
Definition: mp++.hpp:4640
mp_integer operator-() const
Negated copy.
Definition: mp++.hpp:2233
bool even_p() const
Test if value is even.
Definition: mp++.hpp:4684
friend mp_integer operator>>(const mp_integer &n, T s)
Right shift operator.
Definition: mp++.hpp:3932
friend mp_integer gcd(const mp_integer &op1, const mp_integer &op2)
GCD (binary version).
Definition: mp++.hpp:5058
mppp::mp_integer< SSize > argument_type
The argument type.
Definition: mp++.hpp:5196
friend bool is_one(const mp_integer &n)
Test if an mppp::mp_integer is equal to one.
Definition: mp++.hpp:5147
friend bool is_zero(const mp_integer &n)
Test if an mppp::mp_integer is zero.
Definition: mp++.hpp:5114
mp_integer operator++(int)
Suffix increment.
Definition: mp++.hpp:2201
friend void tdiv_qr(mp_integer &q, mp_integer &r, const mp_integer &n, const mp_integer &d)
Ternary truncated division.
Definition: mp++.hpp:3405
friend mp_integer abs(const mp_integer &n)
Unary absolute value.
Definition: mp++.hpp:4449
friend void add_ui(mp_integer &rop, const mp_integer &op1, unsigned long op2)
Ternary add with unsigned long.
Definition: mp++.hpp:2492
friend mp_integer pow_ui(const mp_integer &base, unsigned long exp)
Binary exponentiation.
Definition: mp++.hpp:4275
friend int probab_prime_p(const mp_integer &n, int reps=25)
Test primality.
Definition: mp++.hpp:4570
mp_integer & operator%=(const T &d)
In-place modulo operator.
Definition: mp++.hpp:3510
friend void mul(mp_integer &rop, const mp_integer &op1, const mp_integer &op2)
Ternary multiplication.
Definition: mp++.hpp:2772
friend void add(mp_integer &rop, const mp_integer &op1, const mp_integer &op2)
Ternary add.
Definition: mp++.hpp:2093
friend bool operator==(const T &op1, const U &op2)
Equality operator.
Definition: mp++.hpp:4160
mppp::mp_integer< SSize > mp_integer
Alias for mppp::mp_integer.
Definition: mp_integer.hpp:60
mp_integer(T x)
Generic constructor.
Definition: mp++.hpp:1186
mp_integer operator--(int)
Suffix decrement.
Definition: mp++.hpp:2314
mp_integer & sqrt()
Integer square root (in-place version).
Definition: mp++.hpp:4626
friend mp_integer sqrt(const mp_integer &n)
Integer square root (unary version).
Definition: mp++.hpp:4652
mp_integer(const std::string &s, int base=10)
Constructor from C++ string (equivalent to the constructor from C string).
Definition: mp++.hpp:1231
friend void pow_ui(mp_integer &rop, const mp_integer &base, unsigned long exp)
Ternary exponentiation.
Definition: mp++.hpp:4258
int sgn() const
Sign.
Definition: mp++.hpp:1611
friend T & operator/=(T &x, const mp_integer &n)
In-place division for interoperable types.
Definition: mp++.hpp:3482
mp_integer & operator*=(const T &op)
In-place multiplication operator.
Definition: mp++.hpp:2814
friend bool operator!=(const T &op1, const U &op2)
Inequality operator.
Definition: mp++.hpp:4177
friend mp_integer binomial(const T &n, const U &k)
Generic binomial coefficient.
Definition: mp++.hpp:4833
friend bool is_negative_one(const mp_integer &n)
Test if an mppp::mp_integer is equal to minus one.
Definition: mp++.hpp:5165
friend common_t< T, U > operator+(const T &op1, const U &op2)
Binary addition operator.
Definition: mp++.hpp:2123
mp_integer & operator<<=(T s)
In-place left shift operator.
Definition: mp++.hpp:3761
friend std::ostream & operator<<(std::ostream &os, const mp_integer &n)
Output stream operator for mp_integer.
Definition: mp++.hpp:1316
mp_integer & operator-=(const T &op)
In-place subtraction operator.
Definition: mp++.hpp:2261
bool is_dynamic() const
Check for dynamic storage.
Definition: mp++.hpp:1300
friend void mul_2exp(mp_integer &rop, const mp_integer &n, ::mp_bitcnt_t s)
Ternary left shift.
Definition: mp++.hpp:3712
friend void tdiv_q_2exp(mp_integer &rop, const mp_integer &n, ::mp_bitcnt_t s)
Ternary right shift.
Definition: mp++.hpp:3906