Jérôme Duval b58ddff026 * modified gcc Makefile.in to copy gmp-impl.h and longlong.h headers to build gmp directory (see http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44455 ).
* merged mpfr 3.0.0 and gmp 5.0.1 in buildtools trunk


git-svn-id: file:///srv/svn/repos/haiku/buildtools/trunk@37378 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-07-03 15:21:01 +00:00

193 lines
5.1 KiB
C

/* mpn_divisible_p -- mpn by mpn divisibility test
THE FUNCTIONS IN THIS FILE ARE FOR INTERNAL USE ONLY. THEY'RE ALMOST
CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR COMPLETELY IN
FUTURE GNU MP RELEASES.
Copyright 2001, 2002, 2005, 2009 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
#include "gmp.h"
#include "gmp-impl.h"
#include "longlong.h"
/* Determine whether {ap,an} is divisible by {dp,dn}. Must have both
operands normalized, meaning high limbs non-zero, except that an==0 is
allowed.
There usually won't be many low zero bits on d, but the checks for this
are fast and might pick up a few operand combinations, in particular they
might reduce d to fit the single-limb mod_1/modexact_1 code.
Future:
Getting the remainder limb by limb would make an early exit possible on
finding a non-zero. This would probably have to be bdivmod style so
there's no addback, but it would need a multi-precision inverse and so
might be slower than the plain method (on small sizes at least).
When d must be normalized (shifted to high bit set), it's possible to
just append a low zero limb to "a" rather than bit-shifting as
mpn_tdiv_qr does internally, so long as it's already been checked that a
has at least as many trailing zeros bits as d. Or equivalently, pass
qxn==1 to mpn_tdiv_qr, if/when it accepts that. */
int
mpn_divisible_p (mp_srcptr ap, mp_size_t an,
mp_srcptr dp, mp_size_t dn)
{
mp_limb_t alow, dlow, dmask;
mp_ptr qp, rp, tp;
mp_size_t i;
mp_limb_t di;
unsigned twos;
TMP_DECL;
ASSERT (an >= 0);
ASSERT (an == 0 || ap[an-1] != 0);
ASSERT (dn >= 1);
ASSERT (dp[dn-1] != 0);
ASSERT_MPN (ap, an);
ASSERT_MPN (dp, dn);
/* When a<d only a==0 is divisible.
Notice this test covers all cases of an==0. */
if (an < dn)
return (an == 0);
/* Strip low zero limbs from d, requiring a==0 on those. */
for (;;)
{
alow = *ap;
dlow = *dp;
if (dlow != 0)
break;
if (alow != 0)
return 0; /* a has fewer low zero limbs than d, so not divisible */
/* a!=0 and d!=0 so won't get to n==0 */
an--; ASSERT (an >= 1);
dn--; ASSERT (dn >= 1);
ap++;
dp++;
}
/* a must have at least as many low zero bits as d */
dmask = LOW_ZEROS_MASK (dlow);
if ((alow & dmask) != 0)
return 0;
if (dn == 1)
{
if (ABOVE_THRESHOLD (an, BMOD_1_TO_MOD_1_THRESHOLD))
return mpn_mod_1 (ap, an, dlow) == 0;
count_trailing_zeros (twos, dlow);
dlow >>= twos;
return mpn_modexact_1_odd (ap, an, dlow) == 0;
}
if (dn == 2)
{
mp_limb_t dsecond = dp[1];
if (dsecond <= dmask)
{
count_trailing_zeros (twos, dlow);
dlow = (dlow >> twos) | (dsecond << (GMP_NUMB_BITS-twos));
ASSERT_LIMB (dlow);
return MPN_MOD_OR_MODEXACT_1_ODD (ap, an, dlow) == 0;
}
}
/* Should we compute Q = A * D^(-1) mod B^k,
R = A - Q * D mod B^k
here, for some small values of k? Then check if R = 0 (mod B^k). */
/* We could also compute A' = A mod T and D' = D mod P, for some
P = 3 * 5 * 7 * 11 ..., and then check if any prime factor from P
dividing D' also divides A'. */
TMP_MARK;
rp = TMP_ALLOC_LIMBS (an + 1);
qp = TMP_ALLOC_LIMBS (an - dn + 1); /* FIXME: Could we avoid this */
count_trailing_zeros (twos, dp[0]);
if (twos != 0)
{
tp = TMP_ALLOC_LIMBS (dn);
ASSERT_NOCARRY (mpn_rshift (tp, dp, dn, twos));
dp = tp;
ASSERT_NOCARRY (mpn_rshift (rp, ap, an, twos));
}
else
{
MPN_COPY (rp, ap, an);
}
if (rp[an - 1] >= dp[dn - 1])
{
rp[an] = 0;
an++;
}
else if (an == dn)
{
TMP_FREE;
return 0;
}
ASSERT (an > dn); /* requirement of functions below */
if (BELOW_THRESHOLD (dn, DC_BDIV_QR_THRESHOLD) ||
BELOW_THRESHOLD (an - dn, DC_BDIV_QR_THRESHOLD))
{
binvert_limb (di, dp[0]);
mpn_sbpi1_bdiv_qr (qp, rp, an, dp, dn, -di);
rp += an - dn;
}
else if (BELOW_THRESHOLD (dn, MU_BDIV_QR_THRESHOLD))
{
binvert_limb (di, dp[0]);
mpn_dcpi1_bdiv_qr (qp, rp, an, dp, dn, -di);
rp += an - dn;
}
else
{
tp = TMP_ALLOC_LIMBS (mpn_mu_bdiv_qr_itch (an, dn));
mpn_mu_bdiv_qr (qp, rp, rp, an, dp, dn, tp);
}
/* test for {rp,dn} zero or non-zero */
i = 0;
do
{
if (rp[i] != 0)
{
TMP_FREE;
return 0;
}
}
while (++i < dn);
TMP_FREE;
return 1;
}