@@ 46,9 46,12 @@ def check_normalised(x, Dst, Dmax, ERmax, rtol=1e-9):
def normalised_N_fixed(Dst, Dmax, ERmax, N, check=True):
"""Single-sided clustering on the unit interval with specified number of point."""
+ if Dmax <= Dst:
+ raise ClusteringException(f'Dmax={Dmax} should be > Dst={Dst}')
+
# Number of points needed to expand Dst to Dmax at full stretch
# Expanding for more points will give D > Dmax, which we don't want
- N1a = np.floor(np.log(Dmax / Dst) / np.log(ERmax)).astype(int)+1
+ N1a = np.ceil(np.log(Dmax / Dst) / np.log(ERmax)).astype(int)+1
# Length at which we will need to cap the spacing
L1a = Dst * (1.-ERmax**(N1a-1))/(1.-ERmax)
@@ 57,8 60,17 @@ def normalised_N_fixed(Dst, Dmax, ERmax, N, check=True):
# Any less than this, we cannot reach
N1b = np.ceil(np.log(1.-(1.-ERmax)/Dst)/np.log(ERmax)).astype(int) + 1
+ # Number of points for uniform spacing at Dst
+ Nunif = np.ceil(1./Dst)
+
print(N1a, N1b, N, L1a)
+ if N > Nunif:
+ raise ClusteringException(
+ f'Too many points N={N} for a uniform spacing of Dst={Dst}.'
+ )
+
+
if N < N1b:
raise ClusteringException(
f'Insufficient points to reach unit length with Dst={Dst}, ERmax={ERmax} (for any Dmax)'
@@ 91,7 103,51 @@ def normalised_N_fixed(Dst, Dmax, ERmax, N, check=True):
else:
# If we used all N points, D would exceed Dmax, so uniform needed
- raise Exception('Spacing needs to be capped')
+
+
+ # How far can we go with a linear increase in spacing?
+ N2 = N - N1a
+ dx1max = Dst * ERmax ** (N1a-2)
+ L2 = N2 * 0.5 * (dx1max + Dmax)
+ if L1a + L2 < 1.:
+ raise ClusteringException(f'Not enough points to cluster with Dmax={Dmax} cap, total length only {L1+L2}.')
+
+ # Binary search for cap distance that gives closes to unit length
+ def _iter(Dmaxi):
+ N1ai = np.ceil(np.log(Dmaxi / Dst) / np.log(ERmax)).astype(int)+1
+ L1ai = Dst * (1.-ERmax**(N1ai-1))/(1.-ERmax)
+ dx1max = Dst * ERmax ** (N1ai-2)
+ N2i = N - N1ai
+ L2i = N2i * 0.5 * (dx1max + Dmaxi)
+ return L1ai + L2i - 1.
+
+ Dmax_high = Dmax
+ Dmax_low = Dst
+ while Dmax_high-Dmax_low > 0.01*Dmax:
+ Dmax_new = 0.5*(Dmax_high+Dmax_low)
+ err_new = _iter(Dmax_new)
+ if err_new > 0:
+ Dmax_high = Dmax_new
+ else:
+ Dmax_low = Dmax_new
+
+ def _iter(ERi):
+ N1ai = np.ceil(np.log(Dmax_high / Dst) / np.log(ERi)).astype(int)+1
+ L1ai = Dst * (1.-ERi**(N1ai-1))/(1.-ERi)
+ dx1max = Dst * ERi ** (N1ai-2)
+ N2i = N - N1ai
+ L2i = N2i * 0.5 * (dx1max + Dmax_high)
+ return L1ai + L2i - 1.
+
+ ER = root_scalar(_iter, x0=ERmax).root
+
+ N1a_cap = np.ceil(np.log(Dmax_high / Dst) / np.log(ER)).astype(int)+1
+ N2_cap = N - N1a_cap
+ dx1 = Dst * ER ** np.arange(0, N1a_cap-1)
+ dx2 = np.linspace(dx1[-1],Dmax_high, N2_cap)
+ dx = np.concatenate((dx1, dx2))
+ x = cumsum0(dx)
+
if check:
if not len(x) == N:
@@ 111,10 167,9 @@ import matplotlib.pyplot as plt
Dst = 0.001
-Dmax = 0.1
-ER = 1.2
-
-N = 31
+Dmax = 0.08
+ER = 1.1
+N = 56
xx = normalised_N_fixed(Dst, Dmax, ER, N)