diff --git a/src/lib/classify.ml b/src/lib/classify.ml index cc7bfcc..22d6069 100644 --- a/src/lib/classify.ml +++ b/src/lib/classify.ml @@ -60,7 +60,7 @@ module type Classifier_intf = sig val eval : t -> feature -> clas probabilities type samples = (clas * feature) list - val estimate : ?spec:spec -> ?classes:clas list -> samples -> t + val estimate : ?opt:opt -> ?classes:clas list -> samples -> t end module type Generative_intf = sig @@ -136,7 +136,7 @@ let estimate_naive_bayes modulename (type c) init update incorporate module BinomialNaiveBayes(Data: Dummy_encoded_data_intf) : (Generative_intf with type feature = Data.feature and type clas = Data.clas - and type spec = binomial_spec) + and type opt = binomial_spec) = struct type feature = Data.feature @@ -174,12 +174,12 @@ module BinomialNaiveBayes(Data: Dummy_encoded_data_intf) in eval_naive_bayes ~to_prior ~to_likelihood nb.table - type spec = binomial_spec + type opt = binomial_spec let default = { smoothing = 0.0; bernoulli = false } module Cm = Map.Make(struct type t = clas let compare = compare end) - let estimate ?(spec=default) ?classes data = + let estimate ?(opt=default) ?classes data = let aa = Data.size + 1 in let init _cls = Array.make aa 0 in let update arr ftr = @@ -190,7 +190,7 @@ module BinomialNaiveBayes(Data: Dummy_encoded_data_intf) arr in let incorporate all num_classes totalf = - let to_prob = smoothing_to_prob spec.smoothing in + let to_prob = smoothing_to_prob opt.smoothing in List.map all ~f:(fun (cl, attr_count) -> let prior_count = float attr_count.(Data.size) in let likelihood = @@ -206,7 +206,7 @@ module BinomialNaiveBayes(Data: Dummy_encoded_data_intf) estimate_naive_bayes "BinomialNaiveBayes" init update incorporate (module Cm) ?classes data in - {table ; e_bernoulli = spec.bernoulli} + {table ; e_bernoulli = opt.bernoulli} let class_probabilities nb cls = let arr = List.assoc cls nb.table in @@ -235,7 +235,7 @@ type smoothing = float module CategoricalNaiveBayes(Data: Category_encoded_data_intf) : (Generative_intf with type feature = Data.feature and type clas = Data.clas - and type spec = smoothing) + and type opt = smoothing) = struct @@ -274,12 +274,12 @@ module CategoricalNaiveBayes(Data: Category_encoded_data_intf) in eval_naive_bayes ~to_prior ~to_likelihood table - type spec = smoothing + type opt = smoothing let default = 0.0 module Cm = Map.Make(struct type t = clas let compare = compare end) - let estimate ?(spec=default) = + let estimate ?(opt=default) = let init _ = (0, Array.map (fun i -> Array.make i 0) Data.encoding_sizes) in let update (c, arr) ftr = let ftr_arr = safe_encoding ftr in @@ -287,7 +287,7 @@ module CategoricalNaiveBayes(Data: Category_encoded_data_intf) (c + 1, arr) in let incorporate all num_classes totalf = - let to_prob = smoothing_to_prob spec in + let to_prob = smoothing_to_prob opt in List.map all ~f:(fun (cl, (class_count, attr_count)) -> let prior = to_prob (float class_count) totalf (float num_classes) in let likelihood = @@ -322,7 +322,7 @@ let to_safe_encoding_size_checked interfacename size encoding f = module GaussianNaiveBayes(Data: Continuous_encoded_data_intf) : (Generative_intf with type feature = Data.feature and type clas = Data.clas - and type spec = unit) + and type opt = unit) = struct @@ -353,13 +353,13 @@ module GaussianNaiveBayes(Data: Continuous_encoded_data_intf) in eval_naive_bayes ~to_prior ~to_likelihood table - type spec = unit + type opt = unit let default = () module Cm = Map.Make(struct type t = clas let compare = compare end) - let estimate ?(spec=default) = - ignore spec; + let estimate ?(opt=default) = + ignore opt; let init _c = (0, Array.make Data.size Running.empty) in let update (c, rs_arr) ftr = let attr = safe_encoding ftr in @@ -396,7 +396,7 @@ module LrCommon(Data: Continuous_encoded_data_intf) = struct type samples = (clas * feature) list - type spec = log_reg_spec + type opt = log_reg_spec let default = { lambda = 1e-4 ; tolerance = 1e4 } @@ -408,7 +408,7 @@ module LrCommon(Data: Continuous_encoded_data_intf) = struct let copy1 arr = Array.init (Data.size + 1) (function | 0 -> 1. | i -> arr.(i - 1)) (* map classes to [1;2 ... 3], convert features to matrix and run Softmax *) - let estimate ~method_name ~class_bound ~to_t ?(spec=default) ?(classes=[]) data = + let estimate ~method_name ~class_bound ~to_t ?(opt=default) ?(classes=[]) data = let class_bound = match class_bound with | None -> fun n -> n @@ -444,8 +444,8 @@ module LrCommon(Data: Continuous_encoded_data_intf) = struct in let weights = Softmax_regression.regress - ~lambda:spec.lambda - ~tolerance:spec.tolerance + ~lambda:opt.lambda + ~tolerance:opt.tolerance ftrs classes in let sortedc = @@ -461,7 +461,7 @@ module LogisticRegression(Data: Continuous_encoded_data_intf) : sig include Classifier_intf with type feature = Data.feature and type clas = Data.clas - and type spec = log_reg_spec + and type opt = log_reg_spec val coefficients : t -> float array @@ -504,7 +504,7 @@ module MulticlassLogisticRegression(Data: Continuous_encoded_data_intf) : sig include Classifier_intf with type feature = Data.feature and type clas = Data.clas - and type spec = log_reg_spec + and type opt = log_reg_spec val coefficients : t -> float array array diff --git a/src/lib/classify.mli b/src/lib/classify.mli index 2b0fb32..4088596 100644 --- a/src/lib/classify.mli +++ b/src/lib/classify.mli @@ -109,7 +109,7 @@ module type Classifier_intf = sig (** Representing training data. *) type samples = (clas * feature) list - (** [estimate spec classes samples] estimates a classifier based upon the + (** [estimate opt classes samples] estimates a classifier based upon the training [samples]. [classes] is an optional argument to specify ahead of time the possible @@ -117,14 +117,14 @@ module type Classifier_intf = sig This is useful for models where we know the population domain but may not see an example of a training datum for rare cases. - [spec] are the optional classifier dependent estimation/evaluation + [opt] are the optional classifier dependent estimation/evaluation arguments. @raise Invalid_argument if [classes] are specified and new ones are found in the training [samples]. @raise Invalid_argument if [samples] is empty. *) - val estimate : ?spec:spec -> ?classes:clas list -> samples -> t + val estimate : ?opt:opt -> ?classes:clas list -> samples -> t end (** A generative classifier builds models of the form @@ -172,7 +172,7 @@ type binomial_spec = classifier on data encoded using {{!modtype:Dummy_encoded_data_intf}Dummy variables.} *) module BinomialNaiveBayes(D: Dummy_encoded_data_intf) : - Generative_intf with type spec = binomial_spec + Generative_intf with type opt = binomial_spec and type feature = D.feature and type clas = D.clas @@ -181,7 +181,7 @@ module BinomialNaiveBayes(D: Dummy_encoded_data_intf) : classifier on data encoded using {{!modtype:Category_encoded_data_intf}Categorical variables.} *) module CategoricalNaiveBayes(D: Category_encoded_data_intf) : - Generative_intf with type spec = smoothing + Generative_intf with type opt = smoothing and type feature = D.feature and type clas = D.clas @@ -191,7 +191,7 @@ module CategoricalNaiveBayes(D: Category_encoded_data_intf) : for each of the quantitative features in the {{!modtype:Continuous_encoded_data_intf}encoded data}. *) module GaussianNaiveBayes(D: Continuous_encoded_data_intf) : - Generative_intf with type spec = unit + Generative_intf with type opt = unit and type feature = D.feature and type clas = D.clas @@ -230,7 +230,7 @@ type log_reg_spec = *) module LogisticRegression(D: Continuous_encoded_data_intf) : sig - include Classifier_intf with type spec = log_reg_spec + include Classifier_intf with type opt = log_reg_spec and type feature = D.feature and type clas = D.clas @@ -259,7 +259,7 @@ module LogisticRegression(D: Continuous_encoded_data_intf) : *) module MulticlassLogisticRegression(D: Continuous_encoded_data_intf) : sig - include Classifier_intf with type spec = log_reg_spec + include Classifier_intf with type opt = log_reg_spec and type feature = D.feature and type clas = D.clas diff --git a/src/lib/classify.mlt b/src/lib/classify.mlt index 1e2510a..01de036 100644 --- a/src/lib/classify.mlt +++ b/src/lib/classify.mlt @@ -58,7 +58,7 @@ let () = let size = 5 end) in - let naiveb = NB.estimate ~spec:{NB.default with bernoulli = true } data in + let naiveb = NB.estimate ~opt:{NB.default with bernoulli = true } data in let sample = [ `shortbread ; `whiskey; `porridge ] in let result = NB.eval naiveb sample in let expect = diff --git a/src/lib/regression.ml b/src/lib/regression.ml index bb6242d..fe3f1f6 100644 --- a/src/lib/regression.ml +++ b/src/lib/regression.ml @@ -30,7 +30,7 @@ module type Linear_model_intf = sig val describe : t -> string val eval : t -> input -> float - val regress : ?spec:spec -> input array -> resp:float array -> t + val regress : ?opt:opt -> input array -> resp:float array -> t val residuals : t -> float array val coefficients : t -> float array @@ -63,7 +63,7 @@ module Univariate = struct ; inferred_var : float (* inferred variance of error. *) ; s_yy : float (* sum of diff or resp to mean, TSS. *) ; s_xx : float (* sum of diff of pred to mean. *) - ; goodness_of_fit : float option + (*; goodness_of_fit : float option *) } let alpha lrm = lrm.alpha @@ -82,23 +82,23 @@ module Univariate = struct let eval lrm x = lrm.alpha +. lrm.beta *. x - type spec = float array + type opt = float array - let default = [||] + let opt ?weights () = match weights with | None -> [||] | Some a -> a - let regress ?spec pred ~resp = + let default = opt () + + let regress ?(opt=default) pred ~resp = let n = Array.length pred in - (* Optional spec argument allows us to specify the individual error + (* Optional argument allows us to specify the individual error weights on each observation. *) + let an = Array.length opt in let weights = - match spec with - | None -> Array.make n 1.0 - | Some a -> - let an = Array.length a in - if an <> n then - invalidArg "regress: spec length %d <> d predictor size %d" an n - else - a + if an = 0 then Array.make n 1.0 + else if an <> n then + invalidArg "regress: opt length %d <> d predictor size %d" an n + else + opt in let w_s = Array.sumf weights in let sum2 f a1 a2 = @@ -119,11 +119,11 @@ module Univariate = struct let srs = sum2 (fun w_i r -> w_i *. r *. r) weights rss in (* degress of freedom: one for the constant and one for beta *) let dgf = float (n - 2) in - let q = - match spec with + (*let q = + match opt with | None -> None | Some _ -> Some (Functions.chi_square_greater (truncate dgf) srs) - in + in*) { m_pred ; m_resp ; size = float n @@ -133,7 +133,7 @@ module Univariate = struct ; residuals = rss ; sum_residuals = srs ; inferred_var = srs /. dgf - ; goodness_of_fit = q + (*; goodness_of_fit = q *) ; s_yy = sum2 ( *. ) d_y d_y ; s_xx = d_xx_w } @@ -176,15 +176,6 @@ module Univariate = struct end -type lambda_spec = - | Spec of float - | From of float array - -type multivariate_spec = - { add_constant_column : bool - ; lambda_spec : lambda_spec option - } - (* 'Solved' (via SVD) linear problem. *) type solved_lp = { coef : vec @@ -197,20 +188,20 @@ module SolveLPViaSvd = struct open Lacaml_util - let to_lambda f g lambda_spec = + let to_lambda f g l2_regularizer = let bestl = - match lambda_spec with - | Spec l -> l - | From arr -> + match l2_regularizer with + | `S l -> l + | `From arr -> let loess = Array.map (fun l -> l, g (f l)) arr in Array.sort (fun (_,s1) (_,s2) -> compare s1 s2) loess; fst loess.(0) in bestl, f bestl - let reg_to_lambda svd resp lambda_spec = + let reg_to_lambda svd resp l2_regularizer = let looe = Svd.looe svd resp in - to_lambda looe Vec.ssqr lambda_spec + to_lambda looe Vec.ssqr l2_regularizer let full_looe cmi pred resi = let h = gemm (gemm pred cmi) ~transb:`T pred in @@ -238,7 +229,7 @@ module SolveLPViaSvd = struct ; resi ; looe } - | Some lambda_spec -> + | Some l2_regularizer -> match pred with | `Unpadded p -> let dcp = lacpy p in @@ -246,7 +237,7 @@ module SolveLPViaSvd = struct (* Odd, that we should know the error on the coefficients _before_ computing them! There's probably a better way to structure this! *) - let lambda, looe = reg_to_lambda svd resp lambda_spec in + let lambda, looe = reg_to_lambda svd resp l2_regularizer in let coef = Svd.solve_linear ~lambda svd resp in { coef ; vaco = `Svd svd @@ -255,7 +246,7 @@ module SolveLPViaSvd = struct } | `Padded (orig, fill) -> let svd = Svd.svd orig in - let lambda, _ = reg_to_lambda svd resp lambda_spec in + let lambda, _ = reg_to_lambda svd resp l2_regularizer in let coef = Svd.solve_linear ~lambda svd resp in (* Set the constant term beta to the mean of the response. See "Estimation of the constant term when using ridge regression" @@ -383,14 +374,18 @@ end module Multivariate = struct + type opt = + { add_constant_column : bool (** Instructs the method to efficiently insert a colum of 1's into the + design matrix for the constant term. *) + ; l2_regularizer : [`S of float | `From of float array] option (** How to optionally determine the ridge parameter. *) + } + include EvalMultiVarite - type spec = multivariate_spec + let opt ?l2_regularizer ?(add_constant_column=true) () = + { add_constant_column ; l2_regularizer } - let default = - { add_constant_column = false - ; lambda_spec = None - } + let default = opt () open Lacaml_util open SolveLPViaSvd @@ -420,9 +415,9 @@ module Multivariate = struct - work through these covariance matrix calculations, they're probably not right - once that's done we can expose the hypothesis testing *) - let regress ?(spec=default) pred ~resp = + let regress ?(opt=default) pred ~resp = let resp = Vec.of_array resp in - let pad = spec.add_constant_column in + let pad = opt.add_constant_column in let pred, num_obs, num_pred, across_pred_col = pad_design_matrix pred pad in (* TODO: replace with folds, across the matrix in Lacaml. *) let num_obs_float = float num_obs in @@ -431,7 +426,7 @@ module Multivariate = struct let m_pred = across_pred_col (col_mean num_obs_float) in (* since num_pred includes a value for the constant coefficient, no -1 is needed. *) let deg_of_freedom = float (num_obs - num_pred) in - let lambda = spec.lambda_spec in + let lambda = opt.l2_regularizer in let solved_lp = solve_lp pred resp lambda in let sum_residuals = dot solved_lp.resi solved_lp.resi in let inferred_var = sum_residuals /. deg_of_freedom in @@ -454,28 +449,26 @@ module Multivariate = struct end -type tikhonov_spec = - { regularizer : float array array - ; lambda_spec : lambda_spec option (* multipliers on the regularizing matrix. *) - } - module Tikhonov = struct + type opt = + { tik_matrix : float array array (** The regularizing matrix. *) + ; l2_regularizer : [`S of float | `From of float array] option (** How to optionally determine the ridge parameter. *) + } + include EvalMultiVarite - type spec = tikhonov_spec + let opt ?(tik_matrix = [|[||]|]) ?l2_regularizer () = + { tik_matrix ; l2_regularizer } - let default = - { regularizer = [|[||]|] - ; lambda_spec = None - } + let default = opt () open Lacaml_util open SolveLPViaSvd - let gtr_to_lambda fit_model lambda_spec = + let gtr_to_lambda fit_model l2_regularizer = let g slp = Vec.ssqr slp.looe in - to_lambda fit_model g lambda_spec + to_lambda fit_model g l2_regularizer (* TODO: This method can be optimized if we use a different decomposition. *) let gtk_solve_lp pred resp tik = function @@ -487,7 +480,7 @@ module Tikhonov = struct let resi = Vec.sub resp (gemv pred coef) in let looe = full_looe covm pred resi in { coef ; vaco = `Cov covm ; resi ; looe} - | Some lambda_spec -> + | Some l2_regularizer -> let covm = gemm ~transa:`T pred pred in let eval l = let copy = lacpy covm in @@ -498,17 +491,16 @@ module Tikhonov = struct let looe = full_looe covm pred resi in { coef ; vaco = `Cov covm ; resi ; looe} in - let lambda, slp = gtr_to_lambda eval lambda_spec in + let lambda, slp = gtr_to_lambda eval l2_regularizer in let _ = P.printf "chose gtr lambda of %0.4f\n" lambda in slp - let regress ?(spec=default) pred ~resp = + let regress ?(opt=default) pred ~resp = let pred = Mat.of_array pred in let resp = Vec.of_array resp in - (* UGH, awkard! to silence 41, need to 'modularize' these *) - let lambda = (spec:tikhonov_spec).lambda_spec in + let lambda = opt.l2_regularizer in let tik = - match spec.regularizer with + match opt.tik_matrix with | [|[||]|] -> Mat.make0 (Mat.dim1 pred) (Mat.dim2 pred) | tm -> Mat.of_array tm in diff --git a/src/lib/regression.mli b/src/lib/regression.mli index 5c42489..b2df88c 100644 --- a/src/lib/regression.mli +++ b/src/lib/regression.mli @@ -32,8 +32,8 @@ module type Linear_model_intf = sig (** [regress options pred resp] computes a linear model of [resp] based off of the independent variables in the design matrix [pred], taking - into account the various method [spec]s. *) - val regress : ?spec:spec -> input array -> resp:float array -> t + into account the various method [opt]s. *) + val regress : ?opt:opt -> input array -> resp:float array -> t (** [residuals t] returns the residuals, the difference between the observed value and the estimated value for the independent, response, values. *) @@ -71,17 +71,21 @@ module type Linear_model_intf = sig end -(** Simple one dimensional regress. *) +(** Simple one dimensional regression. *) module Univariate : sig + (** The optional [opt] for univariate regression are weights for each + observation. One can use them to change the model such that each + error (e_i) is now sampled from it's own distribution: [N(0, s/w_i)], + where s^2 is the error variance and w_i is the weight of the ith + error. *) + type opt = float array + + val opt : ?weights:float array -> unit -> opt + include Linear_model_intf with type input = float - (** The optional [spec] for univariate regression are weights for each - observation. One can use them to change the model such that each - error (e_i) is now sampled from it's own distribution: [N(0, s/w_i)], - where s^2 is the error variance and w_i is the weight of the ith - error. *) - and type spec = float array + and type opt := opt (** [alpha t] a shorthand for the constant parameter used in the regression. Equivalent to [(coefficients t).(0)] *) @@ -101,22 +105,23 @@ module Univariate : sig end -type lambda_spec = - | Spec of float (** Use this specific value. *) - | From of float array (** Choose the value in the array with the lowest Leave-One-Out-Error. *) +(** Multi-dimensional input regression, with support for Ridge regression. *) +module Multivariate : sig -type multivariate_spec = - { add_constant_column : bool (** Instructs the method to efficiently insert a colum of 1's into the + type opt = + { add_constant_column : bool (** Instructs the method to efficiently insert a colum of 1's into the design matrix for the constant term. *) - ; lambda_spec : lambda_spec option (** How to optionally determine the ridge parameter. *) - } + ; l2_regularizer : [`S of float | `From of float array] option (** How to optionally determine the ridge parameter. *) + } -(** Multi-dimensional input regression, with support for Ridge regression. *) -module Multivariate : sig + val opt : ?l2_regularizer:[`S of float | `From of float array] -> + ?add_constant_column:bool -> + unit -> + opt include Linear_model_intf with type input = float array - and type spec = multivariate_spec + and type opt := opt (** [aic linear_model] return the Akaike information criterion for the [linear_model].*) @@ -128,11 +133,6 @@ module Multivariate : sig end -type tikhonov_spec = - { regularizer : float array array (** The regularizing matrix. *) - ; lambda_spec : lambda_spec option (** How to optionally determine the ridge parameter. *) - } - (** Multi-dimensional input regression with a matrix regularizer. described {{:https://en.wikipedia.org/wiki/Tikhonov_regularization} here}. @@ -140,9 +140,19 @@ type tikhonov_spec = been verified. A warning is printed to standard-error. *) module Tikhonov : sig + type opt = + { tik_matrix : float array array (** The regularizing matrix. *) + ; l2_regularizer : [`S of float | `From of float array] option (** How to optionally determine the ridge parameter. *) + } + + val opt : ?tik_matrix:float array array -> + ?l2_regularizer:[`S of float | `From of float array] -> + unit -> + opt + include Linear_model_intf with type input = float array - and type spec = tikhonov_spec + and type opt := opt (** [aic linear_model] return the Akaike information criterion for the [linear_model].*) diff --git a/src/lib/regression.mlt b/src/lib/regression.mlt index bcc03ac..4f9d3c0 100644 --- a/src/lib/regression.mlt +++ b/src/lib/regression.mlt @@ -31,12 +31,8 @@ let looe_manually lambda pred resp = pred |> Array.mapi (fun i p -> let p_pred, p_resp = without i in - let ms = - { add_constant_column = false - ; lambda_spec = Some (Spec lambda) - } - in - let model = M.regress ~spec:ms p_pred ~resp:p_resp in + let opt = M.opt ~add_constant_column:false ~l2_regularizer:(`S lambda) () in + let model = M.regress ~opt p_pred ~resp:p_resp in resp.(i) -. M.eval model p) (* Testing the accuracy of numerical algorithms is hard (and fun). @@ -131,7 +127,7 @@ let () = ~title:"General can recover coefficients." Gen.(general_model 1e11 ~max_samples ~max_predictors) (fun (pred, coef, resp) -> - let glm = M.regress ~resp pred in + let glm = M.(regress ~opt:(opt ~add_constant_column:false ()) ~resp pred) in Vectors.equal ~d:1e-2 (M.coefficients glm) coef) Spec.([just_postcond_pred is_true]); @@ -142,7 +138,7 @@ let () = let glm = M.regress ~resp pred in let r0 = (M.residuals glm).(0) in let e0 = resp.(0) -. M.eval glm pred.(0) in - (*Printf.printf "%.20f\t%.20f\t%b\t%b\n" r0 e0 (r0 = e0) (equal_floats ~d:dx r0 e0)*) + (*Printf.printf "%.20f\t%.20f\t%b\t%b\n" r0 e0 (r0 = e0) (equal_floats ~d:dx r0 e0)*) equal_floats ~d:1e-6 r0 e0) Spec.([just_postcond_pred is_true]); @@ -152,9 +148,9 @@ let () = (fun (pred, _, resp) -> let glm = M.regress ~resp pred in let m = Descriptive.mean resp in - let lambda = Spec (m *. m) in (* has to be big enough *) - let ms = { add_constant_column = false; lambda_spec = Some lambda } in - let rdg = M.regress ~spec:ms ~resp pred in + let lmd = m *. m in (* has to be big enough *) + let opt = M.opt ~add_constant_column:false ~l2_regularizer:(`S lmd) () in + let rdg = M.regress ~opt ~resp pred in let rd = Vectors.dot (M.coefficients rdg) (M.coefficients rdg) in let gd = Vectors.dot (M.coefficients glm) (M.coefficients glm) in rd < gd) @@ -166,12 +162,12 @@ let () = Gen.(general_model 1e11 ~max_samples ~max_predictors) (fun (pred, _, resp) -> let r,c = Matrices.dim pred in - let reg = + let tik_matrix = Array.init r (fun i -> Array.init c (fun j -> if i = j then 1.0 else 0.0)) in - let opt = {regularizer = reg; lambda_spec = None} in - let _t = T.regress ~spec:opt ~resp pred in + let opt = T.opt ~tik_matrix () in + let _t = T.regress ~opt ~resp pred in true) Spec.([just_postcond_pred is_true]); diff --git a/src/lib/util.ml b/src/lib/util.ml index 613f075..aa56673 100644 --- a/src/lib/util.ml +++ b/src/lib/util.ml @@ -192,8 +192,8 @@ end module type Optional_arg_intf = sig - type spec (** type of default argument. *) - val default : spec (** A default value used when not specified.*) + type opt (** type of default argument. *) + val default : opt (** A default value used when not specified.*) end let fst3 (x,_,_) = x diff --git a/src/lib/util.mli b/src/lib/util.mli index fbb747a..7803959 100644 --- a/src/lib/util.mli +++ b/src/lib/util.mli @@ -172,8 +172,8 @@ end (** When passing optional arguments to procedures. *) module type Optional_arg_intf = sig - type spec (** type of default argument. *) - val default : spec (** A default value used when not specified.*) + type opt (** type of default argument. *) + val default : opt (** A default value used when not specified.*) end