diff --git a/jforests/src/main/java/edu/uci/jforests/applications/RankingApp.java b/jforests/src/main/java/edu/uci/jforests/applications/RankingApp.java index 2b89fa3..a6a919c 100644 --- a/jforests/src/main/java/edu/uci/jforests/applications/RankingApp.java +++ b/jforests/src/main/java/edu/uci/jforests/applications/RankingApp.java @@ -27,7 +27,11 @@ import edu.uci.jforests.dataset.RankingDataset; import edu.uci.jforests.dataset.RankingDatasetLoader; import edu.uci.jforests.eval.EvaluationMetric; +import edu.uci.jforests.eval.ranking.MAPEval; import edu.uci.jforests.eval.ranking.NDCGEval; +import edu.uci.jforests.eval.ranking.TRiskAwareFAROEval; +import edu.uci.jforests.eval.ranking.TRiskAwareSAROEval; +import edu.uci.jforests.eval.ranking.URiskAwareEval; import edu.uci.jforests.learning.LearningModule; import edu.uci.jforests.learning.boosting.LambdaMART; import edu.uci.jforests.sample.RankingSample; @@ -108,16 +112,42 @@ protected LearningModule getLearningModule(String name) throws Exception { learner.init(configHolder, (RankingDataset) trainDataset, maxTrainInstances, (validDataset != null ? validDataset.numInstances : trainDataset.numInstances), evaluationMetric); return learner; - } else { + } + else { return super.getLearningModule(name); } } @Override protected EvaluationMetric getEvaluationMetric(String name) throws Exception { + if (name.startsWith("URiskAwareEval:")) + { + final String[] parts = name.split(":"); + final double ALPHA = Double.parseDouble(parts[1]); + final String parentMeasure = parts[2]; + return new URiskAwareEval(this.getEvaluationMetric(parentMeasure), ALPHA); + } + if (name.startsWith("TRiskAwareEvalSARO:") || name.startsWith("TRiskAwareSAROEval:")) + { + final String[] parts = name.split(":"); + final double ALPHA = Double.parseDouble(parts[1]); + final String parentMeasure = parts[2]; + return new TRiskAwareSAROEval(this.getEvaluationMetric(parentMeasure), ALPHA); + } + if (name.startsWith("TRiskAwareEvalFARO:")|| name.startsWith("TRiskAwareFAROEval:")) + { + final String[] parts = name.split(":"); + final double ALPHA = Double.parseDouble(parts[1]); + final String parentMeasure = parts[2]; + return new TRiskAwareFAROEval(this.getEvaluationMetric(parentMeasure), ALPHA); + } if (name.equals("NDCG")) { return new NDCGEval(maxDocsPerQuery, ((RankingTrainingConfig) trainingConfig).validNDCGTruncation); } + if (name.equals("MAP")) { + return new MAPEval(maxDocsPerQuery); + } + return super.getEvaluationMetric(name); } diff --git a/jforests/src/main/java/edu/uci/jforests/eval/EvaluationMetric.java b/jforests/src/main/java/edu/uci/jforests/eval/EvaluationMetric.java index a1f7b2f..306c2f8 100644 --- a/jforests/src/main/java/edu/uci/jforests/eval/EvaluationMetric.java +++ b/jforests/src/main/java/edu/uci/jforests/eval/EvaluationMetric.java @@ -31,6 +31,10 @@ public EvaluationMetric(boolean isLargerBetter) { this.isLargerBetter = isLargerBetter; } + public boolean largerIsBetter() { + return isLargerBetter; + } + public abstract double measure(double[] predictions, Sample sample) throws Exception; public boolean isFirstBetter(double first, double second, double tolerance) { diff --git a/jforests/src/main/java/edu/uci/jforests/eval/ranking/TRiskAwareFAROEval.java b/jforests/src/main/java/edu/uci/jforests/eval/ranking/TRiskAwareFAROEval.java new file mode 100644 index 0000000..b79fec2 --- /dev/null +++ b/jforests/src/main/java/edu/uci/jforests/eval/ranking/TRiskAwareFAROEval.java @@ -0,0 +1,65 @@ +package edu.uci.jforests.eval.ranking; + +import edu.uci.jforests.eval.EvaluationMetric; +import edu.uci.jforests.util.CDF_Normal; + +/** Implements the TRisk risk-sensitive optimisation metric called FARO, as defined by Dincer et al. + * See Taner Dincer, Craig Macdonald and Iadh Ounis, Hypothesis Testing for Risk-Sensitive Evaluation + * of Retrieval Systems, SIGIR 2014. + *
Implementation Details
+ * It is assumed that the natural ordering of documents for each query defines + * the baseline. + * @author Craig Macdonald, University of Glasgow; Taner Dincer, Mugla University. + */ +public class TRiskAwareFAROEval extends TRiskAwareSAROEval { + + public TRiskAwareFAROEval(EvaluationMetric _parent, double alpha) { + super(_parent, alpha); + } + + class FAROSwapScorer extends SAROSwapScorer + { + public FAROSwapScorer(double[] targets, int[] boundaries, int trunc, + int[][] labelCounts, double _alpha, SwapScorer _parent) { + super(targets, boundaries, trunc, labelCounts, _alpha, _parent); + } + + @Override + public double getDelta(int queryIndex, int betterIdx, int rank_i, int worseIdx, int rank_j) + { + //get the change in NDCG + final double delta_M = parentSwap.getDelta(queryIndex, betterIdx, rank_i, worseIdx, rank_j); + + final double M_m = modelEval[queryIndex]; + final double M_b = baselineEval[queryIndex]; + + // Score difference + double d_i = M_m - M_b; + + final double TRisk = d_i / currPairedSTD; + + // beta asymptotically ranges in between a value as small as 0 and a value as large as alpha, + // proportionally to the level of risk commited by the current topic. + // This version follows its own way of weighing a given delta. + // NB: in Dincer et al, beta is called \alpha' + final double beta = (1 - CDF_Normal.normp(TRisk)) * alpha; + + final double delta_T = (1 + beta) * delta_M; + + return delta_T; + } + + + } + + + @Override + public SwapScorer getSwapScorer(double[] targets, int[] boundaries, + int trunc, int[][] labelCounts) throws Exception + { + final SwapScorer parentMeasure = ((RankingEvaluationMetric) parent).getSwapScorer(targets, boundaries, trunc, labelCounts); + return new FAROSwapScorer(targets, boundaries, trunc, labelCounts, + ALPHA, + parentMeasure); + } +} diff --git a/jforests/src/main/java/edu/uci/jforests/eval/ranking/TRiskAwareSAROEval.java b/jforests/src/main/java/edu/uci/jforests/eval/ranking/TRiskAwareSAROEval.java new file mode 100644 index 0000000..24a22b6 --- /dev/null +++ b/jforests/src/main/java/edu/uci/jforests/eval/ranking/TRiskAwareSAROEval.java @@ -0,0 +1,201 @@ +package edu.uci.jforests.eval.ranking; + +//import static org.junit.Assert.assertEquals; +//import org.junit.Test; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; + +import org.junit.Test; + +import edu.uci.jforests.eval.EvaluationMetric; +import edu.uci.jforests.sample.RankingSample; +import edu.uci.jforests.sample.Sample; +import edu.uci.jforests.util.CDF_Normal; +import edu.uci.jforests.util.MathUtil; +import edu.uci.jforests.util.concurrency.BlockingThreadPoolExecutor; + +/** Implements the TRisk risk-sensitive optimisation metric called SARO, as defined by Dincer et al. + * See Taner Dincer, Craig Macdonald and Iadh Ounis, Hypothesis Testing for Risk-Sensitive Evaluation + * of Retrieval Systems, SIGIR 2014. + *
Implementation Details
+ * It is assumed that the natural ordering of documents for each query defines + * the baseline. + * @author Craig Macdonald, University of Glasgow; Taner Dincer, Mugla University. + */ +public class TRiskAwareSAROEval extends URiskAwareEval { + + public TRiskAwareSAROEval(EvaluationMetric _parent, double alpha) { + super(_parent, alpha); + } + + class SAROSwapScorer extends URiskSwapScorer + { + double currPairedSTD = 0.0d; + double c; + double baselineMean; + + + public SAROSwapScorer(double[] targets, int[] boundaries, int trunc, + int[][] labelCounts, double _alpha, SwapScorer _parent) { + super(targets, boundaries, trunc, labelCounts, _alpha, _parent); + c = ((double) boundaries.length -1); + } + + @Override + /* Basic Version */ + public double getDelta(int queryIndex, int betterIdx, int rank_i, int worseIdx, int rank_j) + { + //get the change in NDCG + final double delta_M = parentSwap.getDelta(queryIndex, betterIdx, rank_i, worseIdx, rank_j); + + final double M_m = modelEval[queryIndex]; + final double M_b = baselineEval[queryIndex]; + final double rel_i = targets[betterIdx]; + final double rel_j = targets[worseIdx]; + + // Score difference + double d_i = M_m - M_b; + + final double TRisk = d_i / currPairedSTD; + + // beta asymptotically ranges in between a value as small as 0 and a value as large as alpha, + // proportionally to the level of risk commited by the current topic. + // This version follows the way how URisk weighs a given delta. + //NB: in Dincer et al, beta is called \alpha' + final double beta = (1 - CDF_Normal.normp(TRisk)) * alpha; + + final double delta_T; + + //Scenarios are as defined by Wang et al, SIGIR 2012. + //Scenario A + if (M_m <= M_b) + { + //case A1 + if (rel_i > rel_j && rank_i < rank_j) + { + delta_T = (1.0d + beta) * delta_M; + } + //case A2 + else + { + if (M_b > M_m + delta_M) + { + delta_T = (1.0d + beta) * delta_M; + } + else + { + delta_T = beta * (M_b - M_m) + delta_M; + } + } + } + else //Scenario B + { + if( rel_i > rel_j && rank_i < rank_j ) + { + //case B1 + if (M_b > M_m - Math.abs(delta_M)) + { + delta_T = beta * (M_m - M_b) - (1 + beta) * Math.abs(delta_M); + } + else + { + delta_T = delta_M; + } + } + else + { + delta_T = delta_M; + } + } + + return delta_T; + } + + @Override + public void setCurrentIterationEvaluation(int iteration, double[] nDCG) { + super.setCurrentIterationEvaluation(iteration, nDCG); + if (iteration == 0) + { + double[] params = getEstimates(baselineEval, modelEval, this.alpha); + currPairedSTD = Math.sqrt(params[1]); + System.err.println("Iteration 0 Paired STD="+ currPairedSTD); + baselineMean = MathUtil.getAvg(baselineEval); + System.err.println("Iteration 0 NDCG=" + Arrays.toString(nDCG)); + } + else + { + final double modelMean = MathUtil.getAvg(nDCG); +// if (modelMean < baselineMean) +// { +// /* This does not allign with the paper. std is re-evaluated when modelMean < baselineMean. Why? */ +// double[] params = getEstimates(baselineEval, modelEval, this.alpha); +// currPairedSTD = Math.sqrt(params[1]); +// System.err.println("Iteration " + iteration + " Paired STD=" + currPairedSTD); +// } +// else +// { +// System.err.println("Iteration " + iteration + " Paired STD unchanged " + currPairedSTD); +// } + System.err.println("Iteration " + iteration + " NDCG=" + Arrays.toString(nDCG)); + } + } + } + + /** returns double[] params where params[0] = URisk; params[1] = PairedVar. */ + public static double[] getEstimates(final double[] baselinePerQuery, final double[] perQuery, final double ALPHA) + { + final double c = baselinePerQuery.length; + double sum = 0D; + double SSQR = 0D; + double d_i = 0D; + + for(int i=0; i < c; i++) + { + if (perQuery[i] > baselinePerQuery[i]) + d_i = perQuery[i] - baselinePerQuery[i]; + else + d_i = (1 + ALPHA) * (perQuery[i] - baselinePerQuery[i]); + sum += d_i; + SSQR += d_i * d_i; + } + + final double URisk = sum /c; + final double SQRS = sum * sum; + final double pairedVar = SSQR == SQRS ? 0 : ( SSQR - (SQRS / c) ) / (c-1); + return new double[] {URisk, pairedVar}; + } + + /* Returns TRisk = Math.sqrt(c / PairedVar) * URisk */ + public static double T_measure(final double[] baselinePerQuery, final double[] perQuery, final double ALPHA) + { + final double c = baselinePerQuery.length; + + double[] params = getEstimates(baselinePerQuery, perQuery, ALPHA); + + return params[1] == 0D ? 0 : Math.sqrt(c / params[1]) * params[0]; + } + + @Override + public double measure(double[] predictions, Sample sample) throws Exception { + + final RankingSample rankingSample = (RankingSample)sample; + assert rankingSample.queryBoundaries.length -1 == rankingSample.numQueries; + final double[] naturalOrder = computeNaturalOrderScores(predictions.length, rankingSample.queryBoundaries); + + double[] baselinePerQuery = ((RankingEvaluationMetric) parent).measureByQuery(naturalOrder, sample); + double[] perQuery = ((RankingEvaluationMetric) parent).measureByQuery(predictions, sample); + return T_measure(baselinePerQuery, perQuery, this.ALPHA); + } + + @Override + public SwapScorer getSwapScorer(double[] targets, int[] boundaries, + int trunc, int[][] labelCounts) throws Exception + { + final SwapScorer parentMeasure = ((RankingEvaluationMetric) parent).getSwapScorer(targets, boundaries, trunc, labelCounts); + return new SAROSwapScorer(targets, boundaries, trunc, labelCounts, + ALPHA, + parentMeasure); + } + +} diff --git a/jforests/src/main/java/edu/uci/jforests/eval/ranking/URiskAwareEval.java b/jforests/src/main/java/edu/uci/jforests/eval/ranking/URiskAwareEval.java new file mode 100644 index 0000000..da38308 --- /dev/null +++ b/jforests/src/main/java/edu/uci/jforests/eval/ranking/URiskAwareEval.java @@ -0,0 +1,256 @@ +package edu.uci.jforests.eval.ranking; + +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; + +import org.junit.Test; + +import edu.uci.jforests.eval.EvaluationMetric; +import edu.uci.jforests.sample.RankingSample; +import edu.uci.jforests.sample.Sample; +import edu.uci.jforests.util.MathUtil; +import edu.uci.jforests.util.concurrency.BlockingThreadPoolExecutor; + +/** Implements the URisk risk-sensitive evaluation metric, as defined by Wang et al. + * See Lidan Wang, Paul Bennet and Kevyn Collin-Thompson, SIGIR 2012. + *
Implementation Details
+ * It is assumed that the natural ordering of documents for each query defines
+ * the baseline.
+ * @author Craig Macdonald, University of Glasgow
+ */
+public class URiskAwareEval extends RankingEvaluationMetric {
+
+ EvaluationMetric parent;
+ final double ALPHA;
+
+ static class URiskSwapScorer extends SwapScorer
+ {
+ /** alpha value in the Utility measure */
+ double alpha;
+ /** swapscorer of the parent metric */
+ SwapScorer parentSwap;
+ /** M_m: the performance over all queries of the current model
+ * BEFORE any documents are swapped
+ */
+ double[] modelEval;
+ /** M_b: the mean performance of the baseline ranking over all queries. */
+ double[] baselineEval;
+
+
+ public URiskSwapScorer(
+ double[] targets,
+ int[] boundaries,
+ int trunc,
+ int[][] labelCounts,
+ double _alpha,
+ SwapScorer _parent
+ )
+ {
+ super(targets, boundaries, trunc, labelCounts);
+ this.alpha = _alpha;
+ this.parentSwap = _parent;
+ }
+
+ @Override
+ public void setCurrentIterationEvaluation(int iteration, double[] nDCG) {
+ final double meanNDCG = MathUtil.getAvg(nDCG);
+ if (iteration == 0)
+ {
+ System.err.println("Baseline in iteration 0 has peformance " + meanNDCG);
+ baselineEval = nDCG;
+ modelEval = new double[baselineEval.length];
+ }
+ else
+ {
+ System.err.println("Model in iteration "+iteration+" has peformance " + meanNDCG);
+ modelEval = nDCG;
+ }
+ System.err.println("Iteration " + iteration + " NDCG=" + Arrays.toString(nDCG));
+ }
+
+ @Override
+ public double getDelta(int queryIndex, int betterIdx, int rank_i,
+ int worseIdx, int rank_j) {
+
+ final double M_m = modelEval[queryIndex];
+ final double M_b = baselineEval[queryIndex];
+
+ //This follows the notation of Section 4.3.2 in Wang et al. to the fullest extent possible.
+ //Assertions and if conditions are used to ensure that the conditions described in the proof
+ //are arrived at.
+
+ //change in effectiveness
+ final double delta_M = parentSwap.getDelta(queryIndex, betterIdx, rank_i, worseIdx, rank_j);
+
+ final double rel_i = targets[betterIdx];
+ final double rel_j = targets[worseIdx];
+
+
+ //Our LambaMART implementation only calls getDelta when i is of higher quality better than j.
+ assert rel_i >= rel_j;
+ //the upshot is that not all scenarios in the proof can ever occur. we now check that conditions
+ //are as expected
+
+ //hence, delta_M should always be zero or positive, if it is later ranked than j. This would be a "good" swap
+ if (rank_i > rank_j)
+ {
+ assert delta_M >= 0 : "rank_i=" + rank_i + " rank_j="+rank_j + " delta_M="+delta_M;
+ }
+ //hence, delta_M should always be zero or negative, if it is earlier ranked than j. This would be a "bad" swap
+ if (rank_i < rank_j)
+ {
+ assert delta_M <= 0 : "rank_i=" + rank_i + " rank_j="+rank_j + " delta_M="+delta_M;
+ }
+
+ //change in tradeoff
+ final double delta_T;
+
+ //Scenario A
+ if (M_m <= M_b)
+ {
+ //case A1
+ if (rel_i > rel_j && rank_i< rank_j)
+ {
+ assert delta_M < 0;
+ delta_T = (1.0d + alpha) * delta_M;
+ assert delta_T < 0 : "M_b="+M_b+" M_m="+M_m + " delta_M=" + delta_M + " => delta_T=" + delta_T ;
+ }
+ //case A2
+ else
+ {
+ assert rel_i > rel_j && rank_i > rank_j : "M_b="+M_b+" M_m="+M_m + " delta_M=" + delta_M ;
+ assert delta_M >= 0 : "M_b="+M_b+" M_m="+M_m + " delta_M=" + delta_M
+ + " rel_i="+rel_i+" rel_j="+rel_j+" rank_i="+rank_i+" rank_j="+rank_j;
+ if (M_b > M_m + delta_M)
+ {
+ delta_T = (1.0d + alpha) * delta_M;
+ }
+ else
+ {
+ assert M_b <= M_m + delta_M : "M_b="+M_b+" M_m="+M_m + " delta_M=" + delta_M ;
+ delta_T = alpha * (M_b - M_m) + delta_M;
+ }
+ assert delta_T > 0 : "M_b="+M_b+" M_m="+M_m + " delta_M=" + delta_M
+ + " rel_i="+rel_i+" rel_j="+rel_j+" rank_i="+rank_i+" rank_j="+rank_j+" => delta_T=" + delta_T ;
+ }
+ }
+ else //Scenario B
+ {
+ assert M_m > M_b : "M_b="+M_b+" M_m="+M_m + " delta_M=" + delta_M;
+ if( rel_i > rel_j && rank_i < rank_j )
+ {
+ assert delta_M <= 0 : "rank_i=" + rank_i + " rank_j="+rank_j + " delta_M="+delta_M;
+ //case B1
+ if (M_b > M_m - Math.abs(delta_M))
+ {
+ delta_T = alpha * (M_m - M_b) - (1 + alpha) * Math.abs(delta_M);
+ }
+ else
+ {
+ assert M_b <= M_m - Math.abs(delta_M) : "M_b="+M_b+" M_m="+M_m + " delta_M=" + delta_M ;
+ delta_T = delta_M;
+ }
+ assert delta_T < 0 : "M_b="+M_b+" M_m="+M_m + " delta_M=" + delta_M
+ + " rel_i="+rel_i+" rel_j="+rel_j+" rank_i="+rank_i+" rank_j="+rank_j + " => delta_T=" + delta_T ;
+ }
+ else
+ {
+ assert rel_i > rel_j && rank_i > rank_j;
+ delta_T = delta_M;
+ assert delta_T > 0;
+ }
+ }
+
+ return delta_T;
+ }
+
+ }
+
+ public URiskAwareEval(EvaluationMetric _parent, double alpha)
+ {
+ super(_parent.largerIsBetter());
+ assert _parent.largerIsBetter();
+ parent = _parent;
+ ALPHA = alpha;
+ }
+
+ @Override @SuppressWarnings("unused")
+ public double measure(double[] predictions, Sample sample) throws Exception {
+
+ final RankingSample rankingSample = (RankingSample)sample;
+ assert rankingSample.queryBoundaries.length -1 == rankingSample.numQueries;
+ final double[] naturalOrder = computeNaturalOrderScores(predictions.length, rankingSample.queryBoundaries);
+
+ double[] baselinePerQuery = ((RankingEvaluationMetric) parent).measureByQuery(naturalOrder, sample);
+ double[] perQuery = ((RankingEvaluationMetric) parent).measureByQuery(predictions, sample);
+
+ double T1 = 0, T2 = 0;
+
+ final int queryLength = perQuery.length;
+ double F_reward = 0.0d;
+ double F_risk = 0.0d;
+ for(int i=0;i
+*It is based upon algorithm 5666 for the error function, from:
+*
+*The FORTRAN programmer was Alan Miller. The documentation
+*in the FORTRAN code claims that the function is "accurate
+*to 1.e-15."
+*Steve Verrill
+*translated the FORTRAN code (the March 30, 1986 version)
+*into Java. This translation was performed on January 10, 2001.
+*
+*@param z The method returns the value of the normal
+* cumulative distribution function at z.
+*
+*@version .5 --- January 10, 2001
+*
+*/
+
+
+/*
+
+Here is a copy of the documentation in the FORTRAN code:
+
+ SUBROUTINE NORMP(Z, P, Q, PDF)
+C
+C Normal distribution probabilities accurate to 1.e-15.
+C Z = no. of standard deviations from the mean.
+C P, Q = probabilities to the left & right of Z. P + Q = 1.
+C PDF = the probability density.
+C
+C Based upon algorithm 5666 for the error function, from:
+C Hart, J.F. et al, 'Computer Approximations', Wiley 1968
+C
+C Programmer: Alan Miller
+C
+C Latest revision - 30 March 1986
+C
+
+*/
+
+ public static double normp(double z) {
+
+ double zabs;
+ double p;
+ double expntl,pdf;
+
+ final double p0 = 220.2068679123761;
+ final double p1 = 221.2135961699311;
+ final double p2 = 112.0792914978709;
+ final double p3 = 33.91286607838300;
+ final double p4 = 6.373962203531650;
+ final double p5 = .7003830644436881;
+ final double p6 = .3526249659989109E-01;
+
+ final double q0 = 440.4137358247522;
+ final double q1 = 793.8265125199484;
+ final double q2 = 637.3336333788311;
+ final double q3 = 296.5642487796737;
+ final double q4 = 86.78073220294608;
+ final double q5 = 16.06417757920695;
+ final double q6 = 1.755667163182642;
+ final double q7 = .8838834764831844E-1;
+
+ final double cutoff = 7.071;
+ final double root2pi = 2.506628274631001;
+
+ zabs = Math.abs(z);
+
+// |z| > 37
+
+ if (z > 37.0) {
+
+ p = 1.0;
+
+ return p;
+
+ }
+
+ if (z < -37.0) {
+
+ p = 0.0;
+
+ return p;
+
+ }
+
+// |z| <= 37.
+
+ expntl = Math.exp(-.5*zabs*zabs);
+
+ pdf = expntl/root2pi;
+
+// |z| < cutoff = 10/sqrt(2).
+
+ if (zabs < cutoff) {
+
+ p = expntl*((((((p6*zabs + p5)*zabs + p4)*zabs + p3)*zabs +
+ p2)*zabs + p1)*zabs + p0)/(((((((q7*zabs + q6)*zabs +
+ q5)*zabs + q4)*zabs + q3)*zabs + q2)*zabs + q1)*zabs +
+ q0);
+
+ } else {
+
+ p = pdf/(zabs + 1.0/(zabs + 2.0/(zabs + 3.0/(zabs + 4.0/
+ (zabs + 0.65)))));
+
+ }
+
+ if (z < 0.0) {
+
+ return p;
+
+ } else {
+
+ p = 1.0 - p;
+
+ return p;
+
+ }
+
+ }
+
+ public static void main(String[] arg) {
+ for (int i = 0; i <= 60; i++)
+ System.out.println(CDF_Normal.normp(-3 + (0.1 * i)) + ",");
+ System.out.println("");
+
+ }
+}
+* Hart, J.F. et al, 'Computer Approximations', Wiley 1968
+*
+*