• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env julia
2# -*- julia -*-
3
4# remez.jl - implementation of the Remez algorithm for polynomial approximation
5#
6# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
7# See https://llvm.org/LICENSE.txt for license information.
8# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
9
10import Base.\
11
12# ----------------------------------------------------------------------
13# Helper functions to cope with different Julia versions.
14if VERSION >= v"0.7.0"
15    array1d(T, d) = Array{T, 1}(undef, d)
16    array2d(T, d1, d2) = Array{T, 2}(undef, d1, d2)
17else
18    array1d(T, d) = Array(T, d)
19    array2d(T, d1, d2) = Array(T, d1, d2)
20end
21if VERSION < v"0.5.0"
22    String = ASCIIString
23end
24if VERSION >= v"0.6.0"
25    # Use Base.invokelatest to run functions made using eval(), to
26    # avoid "world age" error
27    run(f, x...) = Base.invokelatest(f, x...)
28else
29    # Prior to 0.6.0, invokelatest doesn't exist (but fortunately the
30    # world age problem also doesn't seem to exist)
31    run(f, x...) = f(x...)
32end
33
34# ----------------------------------------------------------------------
35# Global variables configured by command-line options.
36floatsuffix = "" # adjusted by --floatsuffix
37xvarname = "x" # adjusted by --variable
38epsbits = 256 # adjusted by --bits
39debug_facilities = Set() # adjusted by --debug
40full_output = false # adjusted by --full
41array_format = false # adjusted by --array
42preliminary_commands = array1d(String, 0) # adjusted by --pre
43
44# ----------------------------------------------------------------------
45# Diagnostic and utility functions.
46
47# Enable debugging printouts from a particular subpart of this
48# program.
49#
50# Arguments:
51#    facility   Name of the facility to debug. For a list of facility names,
52#               look through the code for calls to debug().
53#
54# Return value is a BigFloat.
55function enable_debug(facility)
56    push!(debug_facilities, facility)
57end
58
59# Print a diagnostic.
60#
61# Arguments:
62#    facility   Name of the facility for which this is a debug message.
63#    printargs  Arguments to println() if debugging of that facility is
64#               enabled.
65macro debug(facility, printargs...)
66    printit = quote
67        print("[", $facility, "] ")
68    end
69    for arg in printargs
70        printit = quote
71            $printit
72            print($(esc(arg)))
73        end
74    end
75    return quote
76        if $facility in debug_facilities
77            $printit
78            println()
79        end
80    end
81end
82
83# Evaluate a polynomial.
84
85# Arguments:
86#    coeffs   Array of BigFloats giving the coefficients of the polynomial.
87#             Starts with the constant term, i.e. coeffs[i] is the
88#             coefficient of x^(i-1) (because Julia arrays are 1-based).
89#    x        Point at which to evaluate the polynomial.
90#
91# Return value is a BigFloat.
92function poly_eval(coeffs::Array{BigFloat}, x::BigFloat)
93    n = length(coeffs)
94    if n == 0
95        return BigFloat(0)
96    elseif n == 1
97        return coeffs[1]
98    else
99        return coeffs[1] + x * poly_eval(coeffs[2:n], x)
100    end
101end
102
103# Evaluate a rational function.
104
105# Arguments:
106#    ncoeffs  Array of BigFloats giving the coefficients of the numerator.
107#             Starts with the constant term, and 1-based, as above.
108#    dcoeffs  Array of BigFloats giving the coefficients of the denominator.
109#             Starts with the constant term, and 1-based, as above.
110#    x        Point at which to evaluate the function.
111#
112# Return value is a BigFloat.
113function ratfn_eval(ncoeffs::Array{BigFloat}, dcoeffs::Array{BigFloat},
114                    x::BigFloat)
115    return poly_eval(ncoeffs, x) / poly_eval(dcoeffs, x)
116end
117
118# Format a BigFloat into an appropriate output format.
119# Arguments:
120#    x        BigFloat to format.
121#
122# Return value is a string.
123function float_to_str(x)
124    return string(x) * floatsuffix
125end
126
127# Format a polynomial into an arithmetic expression, for pasting into
128# other tools such as gnuplot.
129
130# Arguments:
131#    coeffs   Array of BigFloats giving the coefficients of the polynomial.
132#             Starts with the constant term, and 1-based, as above.
133#
134# Return value is a string.
135function poly_to_string(coeffs::Array{BigFloat})
136    n = length(coeffs)
137    if n == 0
138        return "0"
139    elseif n == 1
140        return float_to_str(coeffs[1])
141    else
142        return string(float_to_str(coeffs[1]), "+", xvarname, "*(",
143                      poly_to_string(coeffs[2:n]), ")")
144    end
145end
146
147# Format a rational function into a string.
148
149# Arguments:
150#    ncoeffs  Array of BigFloats giving the coefficients of the numerator.
151#             Starts with the constant term, and 1-based, as above.
152#    dcoeffs  Array of BigFloats giving the coefficients of the denominator.
153#             Starts with the constant term, and 1-based, as above.
154#
155# Return value is a string.
156function ratfn_to_string(ncoeffs::Array{BigFloat}, dcoeffs::Array{BigFloat})
157    if length(dcoeffs) == 1 && dcoeffs[1] == 1
158        # Special case: if the denominator is just 1, leave it out.
159        return poly_to_string(ncoeffs)
160    else
161        return string("(", poly_to_string(ncoeffs), ")/(",
162                      poly_to_string(dcoeffs), ")")
163    end
164end
165
166# Format a list of x,y pairs into a string.
167
168# Arguments:
169#    xys      Array of (x,y) pairs of BigFloats.
170#
171# Return value is a string.
172function format_xylist(xys::Array{Tuple{BigFloat,BigFloat}})
173    return ("[\n" *
174            join(["  "*string(x)*" -> "*string(y) for (x,y) in xys], "\n") *
175            "\n]")
176end
177
178# ----------------------------------------------------------------------
179# Matrix-equation solver for matrices of BigFloat.
180#
181# I had hoped that Julia's type-genericity would allow me to solve the
182# matrix equation Mx=V by just writing 'M \ V'. Unfortunately, that
183# works by translating the inputs into double precision and handing
184# off to an optimised library, which misses the point when I have a
185# matrix and vector of BigFloat and want my result in _better_ than
186# double precision. So I have to implement my own specialisation of
187# the \ operator for that case.
188#
189# Fortunately, the point of using BigFloats is that we have precision
190# to burn, so I can do completely naïve Gaussian elimination without
191# worrying about instability.
192
193# Arguments:
194#    matrix_in    2-dimensional array of BigFloats, representing a matrix M
195#                 in row-first order, i.e. matrix_in[r,c] represents the
196#                 entry in row r col c.
197#    vector_in    1-dimensional array of BigFloats, representing a vector V.
198#
199# Return value: a 1-dimensional array X of BigFloats, satisfying M X = V.
200#
201# Expects the input to be an invertible square matrix and a vector of
202# the corresponding size, on pain of failing an assertion.
203function \(matrix_in :: Array{BigFloat,2},
204           vector_in :: Array{BigFloat,1})
205    # Copy the inputs, because we'll be mutating them as we go.
206    M = copy(matrix_in)
207    V = copy(vector_in)
208
209    # Input consistency criteria: matrix is square, and vector has
210    # length to match.
211    n = length(V)
212    @assert(n > 0)
213    @assert(size(M) == (n,n))
214
215    @debug("gausselim", "starting, n=", n)
216
217    for i = 1:1:n
218        # Straightforward Gaussian elimination: find the largest
219        # non-zero entry in column i (and in a row we haven't sorted
220        # out already), swap it into row i, scale that row to
221        # normalise it to 1, then zero out the rest of the column by
222        # subtracting a multiple of that row from each other row.
223
224        @debug("gausselim", "matrix=", repr(M))
225        @debug("gausselim", "vector=", repr(V))
226
227        # Find the best pivot.
228        bestrow = 0
229        bestval = 0
230        for j = i:1:n
231            if abs(M[j,i]) > bestval
232                bestrow = j
233                bestval = M[j,i]
234            end
235        end
236        @assert(bestrow > 0) # make sure we did actually find one
237
238        @debug("gausselim", "bestrow=", bestrow)
239
240        # Swap it into row i.
241        if bestrow != i
242            for k = 1:1:n
243                M[bestrow,k],M[i,k] = M[i,k],M[bestrow,k]
244            end
245            V[bestrow],V[i] = V[i],V[bestrow]
246        end
247
248        # Scale that row so that M[i,i] becomes 1.
249        divisor = M[i,i]
250        for k = 1:1:n
251            M[i,k] = M[i,k] / divisor
252        end
253        V[i] = V[i] / divisor
254        @assert(M[i,i] == 1)
255
256        # Zero out all other entries in column i, by subtracting
257        # multiples of this row.
258        for j = 1:1:n
259            if j != i
260                factor = M[j,i]
261                for k = 1:1:n
262                    M[j,k] = M[j,k] - M[i,k] * factor
263                end
264                V[j] = V[j] - V[i] * factor
265                @assert(M[j,i] == 0)
266            end
267        end
268    end
269
270    @debug("gausselim", "matrix=", repr(M))
271    @debug("gausselim", "vector=", repr(V))
272    @debug("gausselim", "done!")
273
274    # Now we're done: M is the identity matrix, so the equation Mx=V
275    # becomes just x=V, i.e. V is already exactly the vector we want
276    # to return.
277    return V
278end
279
280# ----------------------------------------------------------------------
281# Least-squares fitting of a rational function to a set of (x,y)
282# points.
283#
284# We use this to get an initial starting point for the Remez
285# iteration. Therefore, it doesn't really need to be particularly
286# accurate; it only needs to be good enough to wiggle back and forth
287# across the target function the right number of times (so as to give
288# enough error extrema to start optimising from) and not have any
289# poles in the target interval.
290#
291# Least-squares fitting of a _polynomial_ is actually a sensible thing
292# to do, and minimises the rms error. Doing the following trick with a
293# rational function P/Q is less sensible, because it cannot be made to
294# minimise the error function (P/Q-f)^2 that you actually wanted;
295# instead it minimises (P-fQ)^2. But that should be good enough to
296# have the properties described above.
297#
298# Some theory: suppose you're trying to choose a set of parameters a_i
299# so as to minimise the sum of squares of some error function E_i.
300# Basic calculus says, if you do this in one variable, just
301# differentiate and solve for zero. In this case, that works fine even
302# with multiple variables, because you _partially_ differentiate with
303# respect to each a_i, giving a system of equations, and that system
304# turns out to be linear so we just solve it as a matrix.
305#
306# In this case, our parameters are the coefficients of P and Q; to
307# avoid underdetermining the system we'll fix Q's constant term at 1,
308# so that our error function (as described above) is
309#
310# E = \sum (p_0 + p_1 x + ... + p_n x^n - y - y q_1 x - ... - y q_d x^d)^2
311#
312# where the sum is over all (x,y) coordinate pairs. Setting dE/dp_j=0
313# (for each j) gives an equation of the form
314#
315# 0 = \sum 2(p_0 + p_1 x + ... + p_n x^n - y - y q_1 x - ... - y q_d x^d) x^j
316#
317# and setting dE/dq_j=0 gives one of the form
318#
319# 0 = \sum 2(p_0 + p_1 x + ... + p_n x^n - y - y q_1 x - ... - y q_d x^d) y x^j
320#
321# And both of those row types, treated as multivariate linear
322# equations in the p,q values, have each coefficient being a value of
323# the form \sum x^i, \sum y x^i or \sum y^2 x^i, for various i. (Times
324# a factor of 2, but we can throw that away.) So we can go through the
325# list of input coordinates summing all of those things, and then we
326# have enough information to construct our matrix and solve it
327# straight off for the rational function coefficients.
328
329# Arguments:
330#    f        The function to be approximated. Maps BigFloat -> BigFloat.
331#    xvals    Array of BigFloats, giving the list of x-coordinates at which
332#             to evaluate f.
333#    n        Degree of the numerator polynomial of the desired rational
334#             function.
335#    d        Degree of the denominator polynomial of the desired rational
336#             function.
337#    w        Error-weighting function. Takes two BigFloat arguments x,y
338#             and returns a scaling factor for the error at that location.
339#             A larger value indicates that the error should be given
340#             greater weight in the square sum we try to minimise.
341#             If unspecified, defaults to giving everything the same weight.
342#
343# Return values: a pair of arrays of BigFloats (N,D) giving the
344# coefficients of the returned rational function. N has size n+1; D
345# has size d+1. Both start with the constant term, i.e. N[i] is the
346# coefficient of x^(i-1) (because Julia arrays are 1-based). D[1] will
347# be 1.
348function ratfn_leastsquares(f::Function, xvals::Array{BigFloat}, n, d,
349                            w = (x,y)->BigFloat(1))
350    # Accumulate sums of x^i y^j, for j={0,1,2} and a range of x.
351    # Again because Julia arrays are 1-based, we'll have sums[i,j]
352    # being the sum of x^(i-1) y^(j-1).
353    maxpow = max(n,d) * 2 + 1
354    sums = zeros(BigFloat, maxpow, 3)
355    for x = xvals
356        y = f(x)
357        weight = w(x,y)
358        for i = 1:1:maxpow
359            for j = 1:1:3
360                sums[i,j] += x^(i-1) * y^(j-1) * weight
361            end
362        end
363    end
364
365    @debug("leastsquares", "sums=", repr(sums))
366
367    # Build the matrix. We're solving n+d+1 equations in n+d+1
368    # unknowns. (We actually have to return n+d+2 coefficients, but
369    # one of them is hardwired to 1.)
370    matrix = array2d(BigFloat, n+d+1, n+d+1)
371    vector = array1d(BigFloat, n+d+1)
372    for i = 0:1:n
373        # Equation obtained by differentiating with respect to p_i,
374        # i.e. the numerator coefficient of x^i.
375        row = 1+i
376        for j = 0:1:n
377            matrix[row, 1+j] = sums[1+i+j, 1]
378        end
379        for j = 1:1:d
380            matrix[row, 1+n+j] = -sums[1+i+j, 2]
381        end
382        vector[row] = sums[1+i, 2]
383    end
384    for i = 1:1:d
385        # Equation obtained by differentiating with respect to q_i,
386        # i.e. the denominator coefficient of x^i.
387        row = 1+n+i
388        for j = 0:1:n
389            matrix[row, 1+j] = sums[1+i+j, 2]
390        end
391        for j = 1:1:d
392            matrix[row, 1+n+j] = -sums[1+i+j, 3]
393        end
394        vector[row] = sums[1+i, 3]
395    end
396
397    @debug("leastsquares", "matrix=", repr(matrix))
398    @debug("leastsquares", "vector=", repr(vector))
399
400    # Solve the matrix equation.
401    all_coeffs = matrix \ vector
402
403    @debug("leastsquares", "all_coeffs=", repr(all_coeffs))
404
405    # And marshal the results into two separate polynomial vectors to
406    # return.
407    ncoeffs = all_coeffs[1:n+1]
408    dcoeffs = vcat([1], all_coeffs[n+2:n+d+1])
409    return (ncoeffs, dcoeffs)
410end
411
412# ----------------------------------------------------------------------
413# Golden-section search to find a maximum of a function.
414
415# Arguments:
416#    f        Function to be maximised/minimised. Maps BigFloat -> BigFloat.
417#    a,b,c    BigFloats bracketing a maximum of the function.
418#
419# Expects:
420#    a,b,c are in order (either a<=b<=c or c<=b<=a)
421#    a != c             (but b can equal one or the other if it wants to)
422#    f(a) <= f(b) >= f(c)
423#
424# Return value is an (x,y) pair of BigFloats giving the extremal input
425# and output. (That is, y=f(x).)
426function goldensection(f::Function, a::BigFloat, b::BigFloat, c::BigFloat)
427    # Decide on a 'good enough' threshold.
428    threshold = abs(c-a) * 2^(-epsbits/2)
429
430    # We'll need the golden ratio phi, of course. Or rather, in this
431    # case, we need 1/phi = 0.618...
432    one_over_phi = 2 / (1 + sqrt(BigFloat(5)))
433
434    # Flip round the interval endpoints so that the interval [a,b] is
435    # at least as large as [b,c]. (Then we can always pick our new
436    # point in [a,b] without having to handle lots of special cases.)
437    if abs(b-a) < abs(c-a)
438        a,  c  = c,  a
439    end
440
441    # Evaluate the function at the initial points.
442    fa = f(a)
443    fb = f(b)
444    fc = f(c)
445
446    @debug("goldensection", "starting")
447
448    while abs(c-a) > threshold
449        @debug("goldensection", "a: ", a, " -> ", fa)
450        @debug("goldensection", "b: ", b, " -> ", fb)
451        @debug("goldensection", "c: ", c, " -> ", fc)
452
453        # Check invariants.
454        @assert(a <= b <= c || c <= b <= a)
455        @assert(fa <= fb >= fc)
456
457        # Subdivide the larger of the intervals [a,b] and [b,c]. We've
458        # arranged that this is always [a,b], for simplicity.
459        d = a + (b-a) * one_over_phi
460
461        # Now we have an interval looking like this (possibly
462        # reversed):
463        #
464        #    a            d       b            c
465        #
466        # and we know f(b) is bigger than either f(a) or f(c). We have
467        # two cases: either f(d) > f(b), or vice versa. In either
468        # case, we can narrow to an interval of 1/phi the size, and
469        # still satisfy all our invariants (three ordered points,
470        # [a,b] at least the width of [b,c], f(a)<=f(b)>=f(c)).
471        fd = f(d)
472        @debug("goldensection", "d: ", d, " -> ", fd)
473        if fd > fb
474            a,  b,  c  = a,  d,  b
475            fa, fb, fc = fa, fd, fb
476            @debug("goldensection", "adb case")
477        else
478            a,  b,  c  = c,  b,  d
479            fa, fb, fc = fc, fb, fd
480            @debug("goldensection", "cbd case")
481        end
482    end
483
484    @debug("goldensection", "done: ", b, " -> ", fb)
485    return (b, fb)
486end
487
488# ----------------------------------------------------------------------
489# Find the extrema of a function within a given interval.
490
491# Arguments:
492#    f         The function to be approximated. Maps BigFloat -> BigFloat.
493#    grid      A set of points at which to evaluate f. Must be high enough
494#              resolution to make extrema obvious.
495#
496# Returns an array of (x,y) pairs of BigFloats, with each x,y giving
497# the extremum location and its value (i.e. y=f(x)).
498function find_extrema(f::Function, grid::Array{BigFloat})
499    len = length(grid)
500    extrema = array1d(Tuple{BigFloat, BigFloat}, 0)
501    for i = 1:1:len
502        # We have to provide goldensection() with three points
503        # bracketing the extremum. If the extremum is at one end of
504        # the interval, then the only way we can do that is to set two
505        # of the points equal (which goldensection() will cope with).
506        prev = max(1, i-1)
507        next = min(i+1, len)
508
509        # Find our three pairs of (x,y) coordinates.
510        xp, xi, xn = grid[prev], grid[i], grid[next]
511        yp, yi, yn = f(xp), f(xi), f(xn)
512
513        # See if they look like an extremum, and if so, ask
514        # goldensection() to give a more exact location for it.
515        if yp <= yi >= yn
516            push!(extrema, goldensection(f, xp, xi, xn))
517        elseif yp >= yi <= yn
518            x, y = goldensection(x->-f(x), xp, xi, xn)
519            push!(extrema, (x, -y))
520        end
521    end
522    return extrema
523end
524
525# ----------------------------------------------------------------------
526# Winnow a list of a function's extrema to give a subsequence of a
527# specified length, with the extrema in the subsequence alternating
528# signs, and with the smallest absolute value of an extremum in the
529# subsequence as large as possible.
530#
531# We do this using a dynamic-programming approach. We work along the
532# provided array of extrema, and at all times, we track the best set
533# of extrema we have so far seen for each possible (length, sign of
534# last extremum) pair. Each new extremum is evaluated to see whether
535# it can be added to any previously seen best subsequence to make a
536# new subsequence that beats the previous record holder in its slot.
537
538# Arguments:
539#    extrema   An array of (x,y) pairs of BigFloats giving the input extrema.
540#    n         Number of extrema required as output.
541#
542# Returns a new array of (x,y) pairs which is a subsequence of the
543# original sequence. (So, in particular, if the input was sorted by x
544# then so will the output be.)
545function winnow_extrema(extrema::Array{Tuple{BigFloat,BigFloat}}, n)
546    # best[i,j] gives the best sequence so far of length i and with
547    # sign j (where signs are coded as 1=positive, 2=negative), in the
548    # form of a tuple (cost, actual array of x,y pairs).
549    best = fill((BigFloat(0), array1d(Tuple{BigFloat,BigFloat}, 0)), n, 2)
550
551    for (x,y) = extrema
552        if y > 0
553            sign = 1
554        elseif y < 0
555            sign = 2
556        else
557            # A zero-valued extremum cannot possibly contribute to any
558            # optimal sequence, so we simply ignore it!
559            continue
560        end
561
562        for i = 1:1:n
563            # See if we can create a new entry for best[i,sign] by
564            # appending our current (x,y) to some previous thing.
565            if i == 1
566                # Special case: we don't store a best zero-length
567                # sequence :-)
568                candidate = (abs(y), [(x,y)])
569            else
570                othersign = 3-sign # map 1->2 and 2->1
571                oldscore, oldlist = best[i-1, othersign]
572                newscore = min(abs(y), oldscore)
573                newlist = vcat(oldlist, [(x,y)])
574                candidate = (newscore, newlist)
575            end
576            # If our new candidate improves on the previous value of
577            # best[i,sign], then replace it.
578            if candidate[1] > best[i,sign][1]
579                best[i,sign] = candidate
580            end
581        end
582    end
583
584    # Our ultimate return value has to be either best[n,1] or
585    # best[n,2], but it could be either. See which one has the higher
586    # score.
587    if best[n,1][1] > best[n,2][1]
588        ret = best[n,1][2]
589    else
590        ret = best[n,2][2]
591    end
592    # Make sure we did actually _find_ a good answer.
593    @assert(length(ret) == n)
594    return ret
595end
596
597# ----------------------------------------------------------------------
598# Construct a rational-function approximation with equal and
599# alternating weighted deviation at a specific set of x-coordinates.
600
601# Arguments:
602#    f         The function to be approximated. Maps BigFloat -> BigFloat.
603#    coords    An array of BigFloats giving the x-coordinates. There should
604#              be n+d+2 of them.
605#    n, d      The degrees of the numerator and denominator of the desired
606#              approximation.
607#    prev_err  A plausible value for the alternating weighted deviation.
608#              (Required to kickstart a binary search in the nonlinear case;
609#              see comments below.)
610#    w         Error-weighting function. Takes two BigFloat arguments x,y
611#              and returns a scaling factor for the error at that location.
612#              The returned approximation R should have the minimum possible
613#              maximum value of abs((f(x)-R(x)) * w(x,f(x))). Optional
614#              parameter, defaulting to the always-return-1 function.
615#
616# Return values: a pair of arrays of BigFloats (N,D) giving the
617# coefficients of the returned rational function. N has size n+1; D
618# has size d+1. Both start with the constant term, i.e. N[i] is the
619# coefficient of x^(i-1) (because Julia arrays are 1-based). D[1] will
620# be 1.
621function ratfn_equal_deviation(f::Function, coords::Array{BigFloat},
622                               n, d, prev_err::BigFloat,
623                               w = (x,y)->BigFloat(1))
624    @debug("equaldev", "n=", n, " d=", d, " coords=", repr(coords))
625    @assert(length(coords) == n+d+2)
626
627    if d == 0
628        # Special case: we're after a polynomial. In this case, we
629        # have the particularly easy job of just constructing and
630        # solving a system of n+2 linear equations, to find the n+1
631        # coefficients of the polynomial and also the amount of
632        # deviation at the specified coordinates. Each equation is of
633        # the form
634        #
635        #   p_0 x^0 + p_1 x^1 + ... + p_n x^n ± e/w(x) = f(x)
636        #
637        # in which the p_i and e are the variables, and the powers of
638        # x and calls to w and f are the coefficients.
639
640        matrix = array2d(BigFloat, n+2, n+2)
641        vector = array1d(BigFloat, n+2)
642        currsign = +1
643        for i = 1:1:n+2
644            x = coords[i]
645            for j = 0:1:n
646                matrix[i,1+j] = x^j
647            end
648            y = f(x)
649            vector[i] = y
650            matrix[i, n+2] = currsign / w(x,y)
651            currsign = -currsign
652        end
653
654        @debug("equaldev", "matrix=", repr(matrix))
655        @debug("equaldev", "vector=", repr(vector))
656
657        outvector = matrix \ vector
658
659        @debug("equaldev", "outvector=", repr(outvector))
660
661        ncoeffs = outvector[1:n+1]
662        dcoeffs = [BigFloat(1)]
663        return ncoeffs, dcoeffs
664    else
665        # For a nontrivial rational function, the system of equations
666        # we need to solve becomes nonlinear, because each equation
667        # now takes the form
668        #
669        #   p_0 x^0 + p_1 x^1 + ... + p_n x^n
670        #   --------------------------------- ± e/w(x) = f(x)
671        #     x^0 + q_1 x^1 + ... + q_d x^d
672        #
673        # and multiplying up by the denominator gives you a lot of
674        # terms containing e × q_i. So we can't do this the really
675        # easy way using a matrix equation as above.
676        #
677        # Fortunately, this is a fairly easy kind of nonlinear system.
678        # The equations all become linear if you switch to treating e
679        # as a constant, so a reasonably sensible approach is to pick
680        # a candidate value of e, solve all but one of the equations
681        # for the remaining unknowns, and then see what the error
682        # turns out to be in the final equation. The Chebyshev
683        # alternation theorem guarantees that that error in the last
684        # equation will be anti-monotonic in the input e, so we can
685        # just binary-search until we get the two as close to equal as
686        # we need them.
687
688        function try_e(e)
689            # Try a given value of e, derive the coefficients of the
690            # resulting rational function by setting up equations
691            # based on the first n+d+1 of the n+d+2 coordinates, and
692            # see what the error turns out to be at the final
693            # coordinate.
694            matrix = array2d(BigFloat, n+d+1, n+d+1)
695            vector = array1d(BigFloat, n+d+1)
696            currsign = +1
697            for i = 1:1:n+d+1
698                x = coords[i]
699                y = f(x)
700                y_adj = y - currsign * e / w(x,y)
701                for j = 0:1:n
702                    matrix[i,1+j] = x^j
703                end
704                for j = 1:1:d
705                    matrix[i,1+n+j] = -x^j * y_adj
706                end
707                vector[i] = y_adj
708                currsign = -currsign
709            end
710
711            @debug("equaldev", "trying e=", e)
712            @debug("equaldev", "matrix=", repr(matrix))
713            @debug("equaldev", "vector=", repr(vector))
714
715            outvector = matrix \ vector
716
717            @debug("equaldev", "outvector=", repr(outvector))
718
719            ncoeffs = outvector[1:n+1]
720            dcoeffs = vcat([BigFloat(1)], outvector[n+2:n+d+1])
721
722            x = coords[n+d+2]
723            y = f(x)
724            last_e = (ratfn_eval(ncoeffs, dcoeffs, x) - y) * w(x,y) * -currsign
725
726            @debug("equaldev", "last e=", last_e)
727
728            return ncoeffs, dcoeffs, last_e
729        end
730
731        threshold = 2^(-epsbits/2) # convergence threshold
732
733        # Start by trying our previous iteration's error value. This
734        # value (e0) will be one end of our binary-search interval,
735        # and whatever it caused the last point's error to be, that
736        # (e1) will be the other end.
737        e0 = prev_err
738        @debug("equaldev", "e0 = ", e0)
739        nc, dc, e1 = try_e(e0)
740        @debug("equaldev", "e1 = ", e1)
741        if abs(e1-e0) <= threshold
742            # If we're _really_ lucky, we hit the error right on the
743            # nose just by doing that!
744            return nc, dc
745        end
746        s = sign(e1-e0)
747        @debug("equaldev", "s = ", s)
748
749        # Verify by assertion that trying our other interval endpoint
750        # e1 gives a value that's wrong in the other direction.
751        # (Otherwise our binary search won't get a sensible answer at
752        # all.)
753        nc, dc, e2 = try_e(e1)
754        @debug("equaldev", "e2 = ", e2)
755        @assert(sign(e2-e1) == -s)
756
757        # Now binary-search until our two endpoints narrow enough.
758        local emid
759        while abs(e1-e0) > threshold
760            emid = (e1+e0)/2
761            nc, dc, enew = try_e(emid)
762            if sign(enew-emid) == s
763                e0 = emid
764            else
765                e1 = emid
766            end
767        end
768
769        @debug("equaldev", "final e=", emid)
770        return nc, dc
771    end
772end
773
774# ----------------------------------------------------------------------
775# Top-level function to find a minimax rational-function approximation.
776
777# Arguments:
778#    f         The function to be approximated. Maps BigFloat -> BigFloat.
779#    interval  A pair of BigFloats giving the endpoints of the interval
780#              (in either order) on which to approximate f.
781#    n, d      The degrees of the numerator and denominator of the desired
782#              approximation.
783#    w         Error-weighting function. Takes two BigFloat arguments x,y
784#              and returns a scaling factor for the error at that location.
785#              The returned approximation R should have the minimum possible
786#              maximum value of abs((f(x)-R(x)) * w(x,f(x))). Optional
787#              parameter, defaulting to the always-return-1 function.
788#
789# Return values: a tuple (N,D,E,X), where
790
791#    N,D       A pair of arrays of BigFloats giving the coefficients
792#              of the returned rational function. N has size n+1; D
793#              has size d+1. Both start with the constant term, i.e.
794#              N[i] is the coefficient of x^(i-1) (because Julia
795#              arrays are 1-based). D[1] will be 1.
796#    E         The maximum weighted error (BigFloat).
797#    X         An array of pairs of BigFloats giving the locations of n+2
798#              points and the weighted error at each of those points. The
799#              weighted error values will have alternating signs, which
800#              means that the Chebyshev alternation theorem guarantees
801#              that any other function of the same degree must exceed
802#              the error of this one at at least one of those points.
803function ratfn_minimax(f::Function, interval::Tuple{BigFloat,BigFloat}, n, d,
804                       w = (x,y)->BigFloat(1))
805    # We start off by finding a least-squares approximation. This
806    # doesn't need to be perfect, but if we can get it reasonably good
807    # then it'll save iterations in the refining stage.
808    #
809    # Least-squares approximations tend to look nicer in a minimax
810    # sense if you evaluate the function at a big pile of Chebyshev
811    # nodes rather than uniformly spaced points. These values will
812    # also make a good grid to use for the initial search for error
813    # extrema, so we'll keep them around for that reason too.
814
815    # Construct the grid.
816    lo, hi = minimum(interval), maximum(interval)
817    local grid
818    let
819        mid = (hi+lo)/2
820        halfwid = (hi-lo)/2
821        nnodes = 16 * (n+d+1)
822        pi = 2*asin(BigFloat(1))
823        grid = [ mid - halfwid * cos(pi*i/nnodes) for i=0:1:nnodes ]
824    end
825
826    # Find the initial least-squares approximation.
827    (nc, dc) = ratfn_leastsquares(f, grid, n, d, w)
828    @debug("minimax", "initial leastsquares approx = ",
829           ratfn_to_string(nc, dc))
830
831    # Threshold of convergence. We stop when the relative difference
832    # between the min and max (winnowed) error extrema is less than
833    # this.
834    #
835    # This is set to the cube root of machine epsilon on a more or
836    # less empirical basis, because the rational-function case will
837    # not converge reliably if you set it to only the square root.
838    # (Repeatable by using the --test mode.) On the assumption that
839    # input and output error in each iteration can be expected to be
840    # related by a simple power law (because it'll just be down to how
841    # many leading terms of a Taylor series are zero), the cube root
842    # was the next thing to try.
843    threshold = 2^(-epsbits/3)
844
845    # Main loop.
846    while true
847        # Find all the error extrema we can.
848        function compute_error(x)
849            real_y = f(x)
850            approx_y = ratfn_eval(nc, dc, x)
851            return (approx_y - real_y) * w(x, real_y)
852        end
853        extrema = find_extrema(compute_error, grid)
854        @debug("minimax", "all extrema = ", format_xylist(extrema))
855
856        # Winnow the extrema down to the right number, and ensure they
857        # have alternating sign.
858        extrema = winnow_extrema(extrema, n+d+2)
859        @debug("minimax", "winnowed extrema = ", format_xylist(extrema))
860
861        # See if we've finished.
862        min_err = minimum([abs(y) for (x,y) = extrema])
863        max_err = maximum([abs(y) for (x,y) = extrema])
864        variation = (max_err - min_err) / max_err
865        @debug("minimax", "extremum variation = ", variation)
866        if variation < threshold
867            @debug("minimax", "done!")
868            return nc, dc, max_err, extrema
869        end
870
871        # If not, refine our function by equalising the error at the
872        # extrema points, and go round again.
873        (nc, dc) = ratfn_equal_deviation(f, map(x->x[1], extrema),
874                                         n, d, max_err, w)
875        @debug("minimax", "refined approx = ", ratfn_to_string(nc, dc))
876    end
877end
878
879# ----------------------------------------------------------------------
880# Check if a polynomial is well-conditioned for accurate evaluation in
881# a given interval by Horner's rule.
882#
883# This is true if at every step where Horner's rule computes
884# (coefficient + x*value_so_far), the constant coefficient you're
885# adding on is of larger magnitude than the x*value_so_far operand.
886# And this has to be true for every x in the interval.
887#
888# Arguments:
889#    coeffs    The coefficients of the polynomial under test. Starts with
890#              the constant term, i.e. coeffs[i] is the coefficient of
891#              x^(i-1) (because Julia arrays are 1-based).
892#    lo, hi    The bounds of the interval.
893#
894# Return value: the largest ratio (x*value_so_far / coefficient), at
895# any step of evaluation, for any x in the interval. If this is less
896# than 1, the polynomial is at least somewhat well-conditioned;
897# ideally you want it to be more like 1/8 or 1/16 or so, so that the
898# relative rounding error accumulated at each step are reduced by
899# several factors of 2 when the next coefficient is added on.
900
901function wellcond(coeffs, lo, hi)
902    x = max(abs(lo), abs(hi))
903    worst = 0
904    so_far = 0
905    for i = length(coeffs):-1:1
906        coeff = abs(coeffs[i])
907        so_far *= x
908        if coeff != 0
909            thisval = so_far / coeff
910            worst = max(worst, thisval)
911            so_far += coeff
912        end
913    end
914    return worst
915end
916
917# ----------------------------------------------------------------------
918# Small set of unit tests.
919
920function test()
921    passes = 0
922    fails = 0
923
924    function approx_eq(x, y, limit=1e-6)
925        return abs(x - y) < limit
926    end
927
928    function test(condition)
929        if condition
930            passes += 1
931        else
932            println("fail")
933            fails += 1
934        end
935    end
936
937    # Test Gaussian elimination.
938    println("Gaussian test 1:")
939    m = BigFloat[1 1 2; 3 5 8; 13 34 21]
940    v = BigFloat[1, -1, 2]
941    ret = m \ v
942    println("  ",repr(ret))
943    test(approx_eq(ret[1], 109/26))
944    test(approx_eq(ret[2], -105/130))
945    test(approx_eq(ret[3], -31/26))
946
947    # Test leastsquares rational functions.
948    println("Leastsquares test 1:")
949    n = 10000
950    a = array1d(BigFloat, n+1)
951    for i = 0:1:n
952        a[1+i] = i/BigFloat(n)
953    end
954    (nc, dc) = ratfn_leastsquares(x->exp(x), a, 2, 2)
955    println("  ",ratfn_to_string(nc, dc))
956    for x = a
957        test(approx_eq(exp(x), ratfn_eval(nc, dc, x), 1e-4))
958    end
959
960    # Test golden section search.
961    println("Golden section test 1:")
962    x, y = goldensection(x->sin(x),
963                              BigFloat(0), BigFloat(1)/10, BigFloat(4))
964    println("  ", x, " -> ", y)
965    test(approx_eq(x, asin(BigFloat(1))))
966    test(approx_eq(y, 1))
967
968    # Test extrema-winnowing algorithm.
969    println("Winnow test 1:")
970    extrema = [(x, sin(20*x)*sin(197*x))
971               for x in BigFloat(0):BigFloat(1)/1000:BigFloat(1)]
972    winnowed = winnow_extrema(extrema, 12)
973    println("  ret = ", format_xylist(winnowed))
974    prevx, prevy = -1, 0
975    for (x,y) = winnowed
976        test(x > prevx)
977        test(y != 0)
978        test(prevy * y <= 0) # tolerates initial prevx having no sign
979        test(abs(y) > 0.9)
980        prevx, prevy = x, y
981    end
982
983    # Test actual minimax approximation.
984    println("Minimax test 1 (polynomial):")
985    (nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 4, 0)
986    println("  ",e)
987    println("  ",ratfn_to_string(nc, dc))
988    test(0 < e < 1e-3)
989    for x = 0:BigFloat(1)/1000:1
990        test(abs(ratfn_eval(nc, dc, x) - exp(x)) <= e * 1.0000001)
991    end
992
993    println("Minimax test 2 (rational):")
994    (nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 2, 2)
995    println("  ",e)
996    println("  ",ratfn_to_string(nc, dc))
997    test(0 < e < 1e-3)
998    for x = 0:BigFloat(1)/1000:1
999        test(abs(ratfn_eval(nc, dc, x) - exp(x)) <= e * 1.0000001)
1000    end
1001
1002    println("Minimax test 3 (polynomial, weighted):")
1003    (nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 4, 0,
1004                                   (x,y)->1/y)
1005    println("  ",e)
1006    println("  ",ratfn_to_string(nc, dc))
1007    test(0 < e < 1e-3)
1008    for x = 0:BigFloat(1)/1000:1
1009        test(abs(ratfn_eval(nc, dc, x) - exp(x))/exp(x) <= e * 1.0000001)
1010    end
1011
1012    println("Minimax test 4 (rational, weighted):")
1013    (nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 2, 2,
1014                                   (x,y)->1/y)
1015    println("  ",e)
1016    println("  ",ratfn_to_string(nc, dc))
1017    test(0 < e < 1e-3)
1018    for x = 0:BigFloat(1)/1000:1
1019        test(abs(ratfn_eval(nc, dc, x) - exp(x))/exp(x) <= e * 1.0000001)
1020    end
1021
1022    println("Minimax test 5 (rational, weighted, odd degree):")
1023    (nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 2, 1,
1024                                   (x,y)->1/y)
1025    println("  ",e)
1026    println("  ",ratfn_to_string(nc, dc))
1027    test(0 < e < 1e-3)
1028    for x = 0:BigFloat(1)/1000:1
1029        test(abs(ratfn_eval(nc, dc, x) - exp(x))/exp(x) <= e * 1.0000001)
1030    end
1031
1032    total = passes + fails
1033    println(passes, " passes ", fails, " fails ", total, " total")
1034end
1035
1036# ----------------------------------------------------------------------
1037# Online help.
1038function help()
1039    print("""
1040Usage:
1041
1042    remez.jl [options] <lo> <hi> <n> <d> <expr> [<weight>]
1043
1044Arguments:
1045
1046    <lo>, <hi>
1047
1048        Bounds of the interval on which to approximate the target
1049        function. These are parsed and evaluated as Julia expressions,
1050        so you can write things like '1/BigFloat(6)' to get an
1051        accurate representation of 1/6, or '4*atan(BigFloat(1))' to
1052        get pi. (Unfortunately, the obvious 'BigFloat(pi)' doesn't
1053        work in Julia.)
1054
1055    <n>, <d>
1056
1057        The desired degree of polynomial(s) you want for your
1058        approximation. These should be non-negative integers. If you
1059        want a rational function as output, set <n> to the degree of
1060        the numerator, and <d> the denominator. If you just want an
1061        ordinary polynomial, set <d> to 0, and <n> to the degree of
1062        the polynomial you want.
1063
1064    <expr>
1065
1066        A Julia expression giving the function to be approximated on
1067        the interval. The input value is predefined as 'x' when this
1068        expression is evaluated, so you should write something along
1069        the lines of 'sin(x)' or 'sqrt(1+tan(x)^2)' etc.
1070
1071    <weight>
1072
1073        If provided, a Julia expression giving the weighting factor
1074        for the approximation error. The output polynomial will
1075        minimise the largest absolute value of (P-f) * w at any point
1076        in the interval, where P is the value of the polynomial, f is
1077        the value of the target function given by <expr>, and w is the
1078        weight given by this function.
1079
1080        When this expression is evaluated, the input value to P and f
1081        is predefined as 'x', and also the true output value f(x) is
1082        predefined as 'y'. So you can minimise the relative error by
1083        simply writing '1/y'.
1084
1085        If the <weight> argument is not provided, the default
1086        weighting function always returns 1, so that the polynomial
1087        will minimise the maximum absolute error |P-f|.
1088
1089Computation options:
1090
1091    --pre=<predef_expr>
1092
1093        Evaluate the Julia expression <predef_expr> before starting
1094        the computation. This permits you to pre-define variables or
1095        functions which the Julia expressions in your main arguments
1096        can refer to. All of <lo>, <hi>, <expr> and <weight> can make
1097        use of things defined by <predef_expr>.
1098
1099        One internal remez.jl function that you might sometimes find
1100        useful in this expression is 'goldensection', which finds the
1101        location and value of a maximum of a function. For example,
1102        one implementation strategy for the gamma function involves
1103        translating it to put its unique local minimum at the origin,
1104        in which case you can write something like this
1105
1106            --pre='(m,my) = goldensection(x -> -gamma(x),
1107                  BigFloat(1), BigFloat(1.5), BigFloat(2))'
1108
1109        to predefine 'm' as the location of gamma's minimum, and 'my'
1110        as the (negated) value that gamma actually takes at that
1111        point, i.e. -gamma(m).
1112
1113        (Since 'goldensection' always finds a maximum, we had to
1114        negate gamma in the input function to make it find a minimum
1115        instead. Consult the comments in the source for more details
1116        on the use of this function.)
1117
1118        If you use this option more than once, all the expressions you
1119        provide will be run in sequence.
1120
1121    --bits=<bits>
1122
1123        Specify the accuracy to which you want the output polynomial,
1124        in bits. Default 256, which should be more than enough.
1125
1126    --bigfloatbits=<bits>
1127
1128        Turn up the precision used by Julia for its BigFloat
1129        evaluation. Default is Julia's default (also 256). You might
1130        want to try setting this higher than the --bits value if the
1131        algorithm is failing to converge for some reason.
1132
1133Output options:
1134
1135    --full
1136
1137        Instead of just printing the approximation function itself,
1138        also print auxiliary information:
1139         - the locations of the error extrema, and the actual
1140           (weighted) error at each of those locations
1141         - the overall maximum error of the function
1142         - a 'well-conditioning quotient', giving the worst-case ratio
1143           between any polynomial coefficient and the largest possible
1144           value of the higher-order terms it will be added to.
1145
1146        The well-conditioning quotient should be less than 1, ideally
1147        by several factors of two, for accurate evaluation in the
1148        target precision. If you request a rational function, a
1149        separate well-conditioning quotient will be printed for the
1150        numerator and denominator.
1151
1152        Use this option when deciding how wide an interval to
1153        approximate your function on, and what degree of polynomial
1154        you need.
1155
1156    --variable=<identifier>
1157
1158        When writing the output polynomial or rational function in its
1159        usual form as an arithmetic expression, use <identifier> as
1160        the name of the input variable. Default is 'x'.
1161
1162    --suffix=<suffix>
1163
1164        When writing the output polynomial or rational function in its
1165        usual form as an arithmetic expression, write <suffix> after
1166        every floating-point literal. For example, '--suffix=F' will
1167        generate a C expression in which the coefficients are literals
1168        of type 'float' rather than 'double'.
1169
1170    --array
1171
1172        Instead of writing the output polynomial as an arithmetic
1173        expression in Horner's rule form, write out just its
1174        coefficients, one per line, each with a trailing comma.
1175        Suitable for pasting into a C array declaration.
1176
1177        This option is not currently supported if the output is a
1178        rational function, because you'd need two separate arrays for
1179        the numerator and denominator coefficients and there's no
1180        obviously right way to provide both of those together.
1181
1182Debug and test options:
1183
1184    --debug=<facility>
1185
1186        Enable debugging output from various parts of the Remez
1187        calculation. <facility> should be the name of one of the
1188        classes of diagnostic output implemented in the program.
1189        Useful values include 'gausselim', 'leastsquares',
1190        'goldensection', 'equaldev', 'minimax'. This is probably
1191        mostly useful to people debugging problems with the script, so
1192        consult the source code for more information about what the
1193        diagnostic output for each of those facilities will be.
1194
1195        If you want diagnostics from more than one facility, specify
1196        this option multiple times with different arguments.
1197
1198    --test
1199
1200        Run remez.jl's internal test suite. No arguments needed.
1201
1202Miscellaneous options:
1203
1204    --help
1205
1206        Display this text and exit. No arguments needed.
1207
1208""")
1209end
1210
1211# ----------------------------------------------------------------------
1212# Main program.
1213
1214function main()
1215    nargs = length(argwords)
1216    if nargs != 5 && nargs != 6
1217        error("usage: remez.jl <lo> <hi> <n> <d> <expr> [<weight>]\n" *
1218              "       run 'remez.jl --help' for more help")
1219    end
1220
1221    for preliminary_command in preliminary_commands
1222        eval(Meta.parse(preliminary_command))
1223    end
1224
1225    lo = BigFloat(eval(Meta.parse(argwords[1])))
1226    hi = BigFloat(eval(Meta.parse(argwords[2])))
1227    n = parse(Int,argwords[3])
1228    d = parse(Int,argwords[4])
1229    f = eval(Meta.parse("x -> " * argwords[5]))
1230
1231    # Wrap the user-provided function with a function of our own. This
1232    # arranges to detect silly FP values (inf,nan) early and diagnose
1233    # them sensibly, and also lets us log all evaluations of the
1234    # function in case you suspect it's doing the wrong thing at some
1235    # special-case point.
1236    function func(x)
1237        y = run(f,x)
1238        @debug("f", x, " -> ", y)
1239        if !isfinite(y)
1240            error("f(" * string(x) * ") returned non-finite value " * string(y))
1241        end
1242        return y
1243    end
1244
1245    if nargs == 6
1246        # Wrap the user-provided weight function similarly.
1247        w = eval(Meta.parse("(x,y) -> " * argwords[6]))
1248        function wrapped_weight(x,y)
1249            ww = run(w,x,y)
1250            if !isfinite(ww)
1251                error("w(" * string(x) * "," * string(y) *
1252                      ") returned non-finite value " * string(ww))
1253            end
1254            return ww
1255        end
1256        weight = wrapped_weight
1257    else
1258        weight = (x,y)->BigFloat(1)
1259    end
1260
1261    (nc, dc, e, extrema) = ratfn_minimax(func, (lo, hi), n, d, weight)
1262    if array_format
1263        if d == 0
1264            functext = join([string(x)*",\n" for x=nc],"")
1265        else
1266            # It's unclear how you should best format an array of
1267            # coefficients for a rational function, so I'll leave
1268            # implementing this option until I have a use case.
1269            error("--array unsupported for rational functions")
1270        end
1271    else
1272        functext = ratfn_to_string(nc, dc) * "\n"
1273    end
1274    if full_output
1275        # Print everything you might want to know about the function
1276        println("extrema = ", format_xylist(extrema))
1277        println("maxerror = ", string(e))
1278        if length(dc) > 1
1279            println("wellconditioning_numerator = ",
1280                    string(wellcond(nc, lo, hi)))
1281            println("wellconditioning_denominator = ",
1282                    string(wellcond(dc, lo, hi)))
1283        else
1284            println("wellconditioning = ", string(wellcond(nc, lo, hi)))
1285        end
1286        print("function = ", functext)
1287    else
1288        # Just print the text people will want to paste into their code
1289        print(functext)
1290    end
1291end
1292
1293# ----------------------------------------------------------------------
1294# Top-level code: parse the argument list and decide what to do.
1295
1296what_to_do = main
1297
1298doing_opts = true
1299argwords = array1d(String, 0)
1300for arg = ARGS
1301    global doing_opts, what_to_do, argwords
1302    global full_output, array_format, xvarname, floatsuffix, epsbits
1303    if doing_opts && startswith(arg, "-")
1304        if arg == "--"
1305            doing_opts = false
1306        elseif arg == "--help"
1307            what_to_do = help
1308        elseif arg == "--test"
1309            what_to_do = test
1310        elseif arg == "--full"
1311            full_output = true
1312        elseif arg == "--array"
1313            array_format = true
1314        elseif startswith(arg, "--debug=")
1315            enable_debug(arg[length("--debug=")+1:end])
1316        elseif startswith(arg, "--variable=")
1317            xvarname = arg[length("--variable=")+1:end]
1318        elseif startswith(arg, "--suffix=")
1319            floatsuffix = arg[length("--suffix=")+1:end]
1320        elseif startswith(arg, "--bits=")
1321            epsbits = parse(Int,arg[length("--bits=")+1:end])
1322        elseif startswith(arg, "--bigfloatbits=")
1323            set_bigfloat_precision(
1324                parse(Int,arg[length("--bigfloatbits=")+1:end]))
1325        elseif startswith(arg, "--pre=")
1326            push!(preliminary_commands, arg[length("--pre=")+1:end])
1327        else
1328            error("unrecognised option: ", arg)
1329        end
1330    else
1331        push!(argwords, arg)
1332    end
1333end
1334
1335what_to_do()
1336