GCG Parameters
This page lists all parameters of the current GCG version.
Since GCG is built on top of SCIP, the SCIP commands are also included.
# SCIP version 7.0.3.5 # branching score function ('s'um, 'p'roduct, 'q'uotient) # [type: char, advanced: TRUE, range: {spq}, default: p] branching/scorefunc = p # branching score factor to weigh downward and upward gain prediction in sum score function # [type: real, advanced: TRUE, range: [0,1], default: 0.167] branching/scorefac = 0.167 # should branching on binary variables be preferred? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] branching/preferbinary = FALSE # minimal relative distance of branching point to bounds when branching on a continuous variable # [type: real, advanced: FALSE, range: [0,0.5], default: 0.2] branching/clamp = 0.2 # fraction by which to move branching point of a continuous variable towards the middle of the domain; a value of 1.0 leads to branching always in the middle of the domain # [type: real, advanced: FALSE, range: [0,1], default: 0.75] branching/midpull = 0.75 # multiply midpull by relative domain width if the latter is below this value # [type: real, advanced: FALSE, range: [0,1], default: 0.5] branching/midpullreldomtrig = 0.5 # strategy for normalization of LP gain when updating pseudocosts of continuous variables (divide by movement of 'l'p value, reduction in 'd'omain width, or reduction in domain width of 's'ibling) # [type: char, advanced: FALSE, range: {dls}, default: s] branching/lpgainnormalize = s # should updating pseudo costs for continuous variables be delayed to the time after separation? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] branching/delaypscostupdate = TRUE # should pseudo costs be updated also in diving and probing mode? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] branching/divingpscost = TRUE # should all strong branching children be regarded even if one is detected to be infeasible? (only with propagation) # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] branching/forceallchildren = FALSE # child node to be regarded first during strong branching (only with propagation): 'u'p child, 'd'own child, 'h'istory-based, or 'a'utomatic # [type: char, advanced: TRUE, range: {aduh}, default: a] branching/firstsbchild = a # should LP solutions during strong branching with propagation be checked for feasibility? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] branching/checksol = TRUE # should LP solutions during strong branching with propagation be rounded? (only when checksbsol=TRUE) # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] branching/roundsbsol = TRUE # score adjustment near zero by adding epsilon (TRUE) or using maximum (FALSE) # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] branching/sumadjustscore = FALSE # should automatic tree compression after the presolving be enabled? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] compression/enable = FALSE # should conflict analysis be enabled? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] conflict/enable = FALSE fix # should conflicts based on an old cutoff bound be removed from the conflict pool after improving the primal bound? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] conflict/cleanboundexceedings = TRUE # use local rows to construct infeasibility proofs # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] conflict/uselocalrows = TRUE # should propagation conflict analysis be used? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] conflict/useprop = TRUE # should infeasible LP conflict analysis be used? ('o'ff, 'c'onflict graph, 'd'ual ray, 'b'oth conflict graph and dual ray) # [type: char, advanced: FALSE, range: {ocdb}, default: b] conflict/useinflp = b # should bound exceeding LP conflict analysis be used? ('o'ff, 'c'onflict graph, 'd'ual ray, 'b'oth conflict graph and dual ray) # [type: char, advanced: FALSE, range: {ocdb}, default: b] conflict/useboundlp = b # should infeasible/bound exceeding strong branching conflict analysis be used? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] conflict/usesb = TRUE # should pseudo solution conflict analysis be used? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] conflict/usepseudo = TRUE # maximal fraction of variables involved in a conflict constraint # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0.15] conflict/maxvarsfac = 0.15 # minimal absolute maximum of variables involved in a conflict constraint # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] conflict/minmaxvars = 0 # maximal number of LP resolving loops during conflict analysis (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 2] conflict/maxlploops = 2 # maximal number of LP iterations in each LP resolving loop (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10] conflict/lpiterations = 10 # number of depth levels up to which first UIP's are used in conflict analysis (-1: use All-FirstUIP rule) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] conflict/fuiplevels = -1 # maximal number of intermediate conflict constraints generated in conflict graph (-1: use every intermediate constraint) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] conflict/interconss = -1 # number of depth levels up to which UIP reconvergence constraints are generated (-1: generate reconvergence constraints in all depth levels) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] conflict/reconvlevels = -1 # maximal number of conflict constraints accepted at an infeasible node (-1: use all generated conflict constraints) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10] conflict/maxconss = 10 # maximal size of conflict store (-1: auto, 0: disable storage) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10000] conflict/maxstoresize = 10000 # should binary conflicts be preferred? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] conflict/preferbinary = FALSE # prefer infeasibility proof to boundexceeding proof # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] conflict/prefinfproof = TRUE # should conflict constraints be generated that are only valid locally? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] conflict/allowlocal = TRUE # should conflict constraints be attached only to the local subtree where they can be useful? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] conflict/settlelocal = FALSE # should earlier nodes be repropagated in order to replace branching decisions by deductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] conflict/repropagate = TRUE # should constraints be kept for repropagation even if they are too long? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] conflict/keepreprop = TRUE # should the conflict constraints be separated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] conflict/separate = TRUE # should the conflict constraints be subject to aging? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] conflict/dynamic = TRUE # should the conflict's relaxations be subject to LP aging and cleanup? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] conflict/removable = TRUE # score factor for depth level in bound relaxation heuristic # [type: real, advanced: TRUE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 1] conflict/graph/depthscorefac = 1 # score factor for impact on acticity in bound relaxation heuristic # [type: real, advanced: TRUE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 1] conflict/proofscorefac = 1 # score factor for up locks in bound relaxation heuristic # [type: real, advanced: TRUE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 0] conflict/uplockscorefac = 0 # score factor for down locks in bound relaxation heuristic # [type: real, advanced: TRUE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 0] conflict/downlockscorefac = 0 # factor to decrease importance of variables' earlier conflict scores # [type: real, advanced: TRUE, range: [1e-06,1], default: 0.98] conflict/scorefac = 0.98 # number of successful conflict analysis calls that trigger a restart (0: disable conflict restarts) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] conflict/restartnum = 0 # factor to increase restartnum with after each restart # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 1.5] conflict/restartfac = 1.5 # should relaxed bounds be ignored? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] conflict/ignorerelaxedbd = FALSE # maximal number of variables to try to detect global bound implications and shorten the whole conflict set (0: disabled) # [type: int, advanced: TRUE, range: [0,2147483647], default: 250] conflict/maxvarsdetectimpliedbounds = 250 # try to shorten the whole conflict set or terminate early (depending on the 'maxvarsdetectimpliedbounds' parameter) # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] conflict/fullshortenconflict = TRUE # the weight the VSIDS score is weight by updating the VSIDS for a variable if it is part of a conflict # [type: real, advanced: FALSE, range: [0,1], default: 0] conflict/conflictweight = 0 # the weight the VSIDS score is weight by updating the VSIDS for a variable if it is part of a conflict graph # [type: real, advanced: FALSE, range: [0,1], default: 1] conflict/conflictgraphweight = 1 # minimal improvement of primal bound to remove conflicts based on a previous incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.05] conflict/minimprove = 0.05 # weight of the size of a conflict used in score calculation # [type: real, advanced: TRUE, range: [0,1], default: 0.001] conflict/weightsize = 0.001 # weight of the repropagation depth of a conflict used in score calculation # [type: real, advanced: TRUE, range: [0,1], default: 0.1] conflict/weightrepropdepth = 0.1 # weight of the valid depth of a conflict used in score calculation # [type: real, advanced: TRUE, range: [0,1], default: 1] conflict/weightvaliddepth = 1 # apply cut generating functions to construct alternative proofs # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] conflict/sepaaltproofs = FALSE # maximum age an unnecessary constraint can reach before it is deleted (0: dynamic, -1: keep all constraints) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 0] constraints/agelimit = 0 # age of a constraint after which it is marked obsolete (0: dynamic, -1 do not mark constraints obsolete) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/obsoleteage = -1 # should enforcement of pseudo solution be disabled? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/disableenfops = FALSE # verbosity level of output # [type: int, advanced: FALSE, range: [0,5], default: 4] display/verblevel = 4 # maximal number of characters in a node information line # [type: int, advanced: FALSE, range: [0,2147483647], default: 143] display/width = 143 # frequency for displaying node information lines # [type: int, advanced: FALSE, range: [-1,2147483647], default: 100] display/freq = 100 # frequency for displaying header lines (every n'th node information line) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 15] display/headerfreq = 15 # should the LP solver display status messages? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] display/lpinfo = FALSE # display all violations for a given start solution / the best solution after the solving process? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] display/allviols = FALSE # should the relevant statistics be displayed at the end of solving? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] display/relevantstats = TRUE # should setting of common subscip parameters include the activation of the UCT node selector? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/useuctsubscip = FALSE # should statistics be collected for variable domain value pairs? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] history/valuebased = FALSE # should variable histories be merged from sub-SCIPs whenever possible? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] history/allowmerge = FALSE # should variable histories be transferred to initialize SCIP copies? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] history/allowtransfer = FALSE # maximal time in seconds to run # [type: real, advanced: FALSE, range: [0,1e+20], default: 1e+20] limits/time = 1e+20 # maximal number of nodes to process (-1: no limit) # [type: longint, advanced: FALSE, range: [-1,9223372036854775807], default: -1] limits/nodes = -1 # maximal number of total nodes (incl. restarts) to process (-1: no limit) # [type: longint, advanced: FALSE, range: [-1,9223372036854775807], default: -1] limits/totalnodes = -1 # solving stops, if the given number of nodes was processed since the last improvement of the primal solution value (-1: no limit) # [type: longint, advanced: FALSE, range: [-1,9223372036854775807], default: -1] limits/stallnodes = -1 # maximal memory usage in MB; reported memory usage is lower than real memory usage! # [type: real, advanced: FALSE, range: [0,8796093022207], default: 8796093022207] limits/memory = 8796093022207 # solving stops, if the relative gap = |primal - dual|/MIN(|dual|,|primal|) is below the given value, the gap is 'Infinity', if primal and dual bound have opposite signs # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0] limits/gap = 0 # solving stops, if the absolute gap = |primalbound - dualbound| is below the given value # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0] limits/absgap = 0 # solving stops, if the given number of solutions were found (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] limits/solutions = -1 # solving stops, if the given number of solution improvements were found (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] limits/bestsol = -1 # maximal number of solutions to store in the solution storage # [type: int, advanced: FALSE, range: [1,2147483647], default: 100] limits/maxsol = 100 # maximal number of solutions candidates to store in the solution storage of the original problem # [type: int, advanced: FALSE, range: [0,2147483647], default: 10] limits/maxorigsol = 10 # solving stops, if the given number of restarts was triggered (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] limits/restarts = -1 # if solve exceeds this number of nodes for the first time, an automatic restart is triggered (-1: no automatic restart) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] limits/autorestartnodes = -1 # frequency for solving LP at the nodes (-1: never; 0: only root LP) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] lp/solvefreq = 1 # iteration limit for each single LP solve (-1: no limit) # [type: longint, advanced: TRUE, range: [-1,9223372036854775807], default: -1] lp/iterlim = -1 # iteration limit for initial root LP solve (-1: no limit) # [type: longint, advanced: TRUE, range: [-1,9223372036854775807], default: -1] lp/rootiterlim = -1 # maximal depth for solving LP at the nodes (-1: no depth limit) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] lp/solvedepth = -1 # LP algorithm for solving initial LP relaxations (automatic 's'implex, 'p'rimal simplex, 'd'ual simplex, 'b'arrier, barrier with 'c'rossover) # [type: char, advanced: FALSE, range: {spdbc}, default: s] lp/initalgorithm = s # LP algorithm for resolving LP relaxations if a starting basis exists (automatic 's'implex, 'p'rimal simplex, 'd'ual simplex, 'b'arrier, barrier with 'c'rossover) # [type: char, advanced: FALSE, range: {spdbc}, default: s] lp/resolvealgorithm = s # LP pricing strategy ('l'pi default, 'a'uto, 'f'ull pricing, 'p'artial, 's'teepest edge pricing, 'q'uickstart steepest edge pricing, 'd'evex pricing) # [type: char, advanced: FALSE, range: {lafpsqd}, default: l] lp/pricing = l # should lp state be cleared at the end of probing mode when lp was initially unsolved, e.g., when called right after presolving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] lp/clearinitialprobinglp = TRUE # should the LP be resolved to restore the state at start of diving (if FALSE we buffer the solution values)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] lp/resolverestore = FALSE # should the buffers for storing LP solution values during diving be freed at end of diving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] lp/freesolvalbuffers = FALSE # maximum age a dynamic column can reach before it is deleted from the LP (-1: don't delete columns due to aging) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10] lp/colagelimit = 10 # maximum age a dynamic row can reach before it is deleted from the LP (-1: don't delete rows due to aging) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10] lp/rowagelimit = 10 # should new non-basic columns be removed after LP solving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] lp/cleanupcols = FALSE # should new non-basic columns be removed after root LP solving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] lp/cleanupcolsroot = FALSE # should new basic rows be removed after LP solving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] lp/cleanuprows = TRUE # should new basic rows be removed after root LP solving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] lp/cleanuprowsroot = TRUE # should LP solver's return status be checked for stability? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] lp/checkstability = TRUE # maximum condition number of LP basis counted as stable (-1.0: no limit) # [type: real, advanced: TRUE, range: [-1,1.79769313486232e+308], default: -1] lp/conditionlimit = -1 # minimal Markowitz threshold to control sparsity/stability in LU factorization # [type: real, advanced: TRUE, range: [0.0001,0.9999], default: 0.01] lp/minmarkowitz = 0.01 # should LP solutions be checked for primal feasibility, resolving LP when numerical troubles occur? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] lp/checkprimfeas = TRUE # should LP solutions be checked for dual feasibility, resolving LP when numerical troubles occur? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] lp/checkdualfeas = TRUE # should infeasibility proofs from the LP be checked? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] lp/checkfarkas = TRUE # which FASTMIP setting of LP solver should be used? 0: off, 1: low # [type: int, advanced: TRUE, range: [0,1], default: 1] lp/fastmip = 1 # LP scaling (0: none, 1: normal, 2: aggressive) # [type: int, advanced: TRUE, range: [0,2], default: 1] lp/scaling = 1 # should presolving of LP solver be used? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] lp/presolving = TRUE # should the lexicographic dual algorithm be used? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] lp/lexdualalgo = FALSE # should the lexicographic dual algorithm be applied only at the root node # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] lp/lexdualrootonly = TRUE # maximum number of rounds in the lexicographic dual algorithm (-1: unbounded) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 2] lp/lexdualmaxrounds = 2 # choose fractional basic variables in lexicographic dual algorithm? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] lp/lexdualbasic = FALSE # turn on the lex dual algorithm only when stalling? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] lp/lexdualstalling = TRUE # disable the cutoff bound in the LP solver? (0: enabled, 1: disabled, 2: auto) # [type: int, advanced: TRUE, range: [0,2], default: 2] lp/disablecutoff = 2 # simplex algorithm shall use row representation of the basis if number of rows divided by number of columns exceeds this value (-1.0 to disable row representation) # [type: real, advanced: TRUE, range: [-1,1.79769313486232e+308], default: 1.2] lp/rowrepswitch = 1.2 # number of threads used for solving the LP (0: automatic) # [type: int, advanced: TRUE, range: [0,64], default: 0] lp/threads = 0 # factor of average LP iterations that is used as LP iteration limit for LP resolve (-1: unlimited) # [type: real, advanced: TRUE, range: [-1,1.79769313486232e+308], default: -1] lp/resolveiterfac = -1 # minimum number of iterations that are allowed for LP resolve # [type: int, advanced: TRUE, range: [1,2147483647], default: 1000] lp/resolveitermin = 1000 # LP solution polishing method (0: disabled, 1: only root, 2: always, 3: auto) # [type: int, advanced: TRUE, range: [0,3], default: 3] lp/solutionpolishing = 3 # LP refactorization interval (0: auto) # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] lp/refactorinterval = 0 # should the Farkas duals always be collected when an LP is found to be infeasible? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] lp/alwaysgetduals = FALSE # solver to use for solving NLPs; leave empty to select NLPI with highest priority # [type: string, advanced: FALSE, default: ""] nlp/solver = "" # should the NLP relaxation be always disabled (also for NLPs/MINLPs)? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] nlp/disable = FALSE # fraction of maximal memory usage resulting in switch to memory saving mode # [type: real, advanced: FALSE, range: [0,1], default: 0.8] memory/savefac = 0.8 # memory growing factor for dynamically allocated arrays # [type: real, advanced: TRUE, range: [1,10], default: 1.2] memory/arraygrowfac = 1.2 # initial size of dynamically allocated arrays # [type: int, advanced: TRUE, range: [0,2147483647], default: 4] memory/arraygrowinit = 4 # memory growing factor for tree array # [type: real, advanced: TRUE, range: [1,10], default: 2] memory/treegrowfac = 2 # initial size of tree array # [type: int, advanced: TRUE, range: [0,2147483647], default: 65536] memory/treegrowinit = 65536 # memory growing factor for path array # [type: real, advanced: TRUE, range: [1,10], default: 2] memory/pathgrowfac = 2 # initial size of path array # [type: int, advanced: TRUE, range: [0,2147483647], default: 256] memory/pathgrowinit = 256 # should the CTRL-C interrupt be caught by SCIP? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/catchctrlc = TRUE # should a hashtable be used to map from variable names to variables? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/usevartable = TRUE # should a hashtable be used to map from constraint names to constraints? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/useconstable = TRUE # should smaller hashtables be used? yields better performance for small problems with about 100 variables # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] misc/usesmalltables = FALSE # should the statistics be reset if the transformed problem is freed (in case of a Benders' decomposition this parameter should be set to FALSE) # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/resetstat = TRUE # should only solutions be checked which improve the primal bound # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] misc/improvingsols = FALSE # should the reason be printed if a given start solution is infeasible # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/printreason = TRUE # should the usage of external memory be estimated? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/estimexternmem = TRUE # should SCIP try to transfer original solutions to the transformed space (after presolving)? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/transorigsols = TRUE # should SCIP try to transfer transformed solutions to the original space (after solving)? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/transsolsorig = TRUE # should SCIP calculate the primal dual integral value? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/calcintegral = TRUE # should SCIP try to remove infinite fixings from solutions copied to the solution store? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] misc/finitesolutionstore = FALSE # should the best solution be transformed to the orignal space and be output in command line run? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/outputorigsol = TRUE # should strong dual reductions be allowed in propagation and presolving? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/allowstrongdualreds = TRUE # should weak dual reductions be allowed in propagation and presolving? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/allowweakdualreds = TRUE # should the objective function be scaled so that it is always integer? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] misc/scaleobj = TRUE # objective value for reference purposes # [type: real, advanced: FALSE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 1e+99] misc/referencevalue = 1e+99 # bitset describing used symmetry handling technique (0: off; 1: polyhedral (orbitopes and/or symresacks); 2: orbital fixing; 3: orbitopes and orbital fixing; 4: Schreier Sims cuts; 5: Schreier Sims cuts and orbitopes); 6: Schreier Sims cuts and orbital fixing; 7: Schreier Sims cuts, orbitopes, and orbital fixing, see type_symmetry.h. # [type: int, advanced: FALSE, range: [0,7], default: 7] misc/usesymmetry = 7 # global shift of all random seeds in the plugins and the LP random seed # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] randomization/randomseedshift = 0 # seed value for permuting the problem after reading/transformation (0: no permutation) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] randomization/permutationseed = 0 # should order of constraints be permuted (depends on permutationseed)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] randomization/permuteconss = TRUE # should order of variables be permuted (depends on permutationseed)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] randomization/permutevars = FALSE # random seed for LP solver, e.g. for perturbations in the simplex (0: LP default) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] randomization/lpseed = 0 # child selection rule ('d'own, 'u'p, 'p'seudo costs, 'i'nference, 'l'p value, 'r'oot LP value difference, 'h'ybrid inference/root LP value difference) # [type: char, advanced: FALSE, range: {dupilrh}, default: h] nodeselection/childsel = h # values larger than this are considered infinity # [type: real, advanced: FALSE, range: [10000000000,1e+98], default: 1e+20] numerics/infinity = 1e+20 # absolute values smaller than this are considered zero # [type: real, advanced: FALSE, range: [1e-20,0.001], default: 1e-09] numerics/epsilon = 1e-09 # absolute values of sums smaller than this are considered zero # [type: real, advanced: FALSE, range: [1e-17,0.001], default: 1e-06] numerics/sumepsilon = 1e-06 # feasibility tolerance for constraints # [type: real, advanced: FALSE, range: [1e-17,0.001], default: 1e-06] numerics/feastol = 1e-06 # feasibility tolerance factor; for checking the feasibility of the best solution # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 1] numerics/checkfeastolfac = 1 # factor w.r.t. primal feasibility tolerance that determines default (and maximal) primal feasibility tolerance of LP solver # [type: real, advanced: FALSE, range: [1e-06,1], default: 1] numerics/lpfeastolfactor = 1 # feasibility tolerance for reduced costs in LP solution # [type: real, advanced: FALSE, range: [1e-17,0.001], default: 1e-07] numerics/dualfeastol = 1e-07 # LP convergence tolerance used in barrier algorithm # [type: real, advanced: TRUE, range: [1e-17,0.001], default: 1e-10] numerics/barrierconvtol = 1e-10 # minimal relative improve for strengthening bounds # [type: real, advanced: TRUE, range: [1e-17,1e+98], default: 0.05] numerics/boundstreps = 0.05 # minimal variable distance value to use for branching pseudo cost updates # [type: real, advanced: TRUE, range: [1e-17,1], default: 0.1] numerics/pseudocosteps = 0.1 # minimal objective distance value to use for branching pseudo cost updates # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0.0001] numerics/pseudocostdelta = 0.0001 # minimal decrease factor that causes the recomputation of a value (e.g., pseudo objective) instead of an update # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 10000000] numerics/recomputefac = 10000000 # values larger than this are considered huge and should be handled separately (e.g., in activity computation) # [type: real, advanced: TRUE, range: [0,1e+98], default: 1e+15] numerics/hugeval = 1e+15 # maximal number of presolving rounds (-1: unlimited, 0: off) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] presolving/maxrounds = -1 # abort presolve, if at most this fraction of the problem was changed in last presolve round # [type: real, advanced: TRUE, range: [0,1], default: 0.0008] presolving/abortfac = 0.0008 # maximal number of restarts (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] presolving/maxrestarts = -1 # fraction of integer variables that were fixed in the root node triggering a restart with preprocessing after root node evaluation # [type: real, advanced: TRUE, range: [0,1], default: 0.025] presolving/restartfac = 0.025 # limit on number of entries in clique table relative to number of problem nonzeros # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 2] presolving/clqtablefac = 2 # fraction of integer variables that were fixed in the root node triggering an immediate restart with preprocessing # [type: real, advanced: TRUE, range: [0,1], default: 0.1] presolving/immrestartfac = 0.1 # fraction of integer variables that were globally fixed during the solving process triggering a restart with preprocessing # [type: real, advanced: TRUE, range: [0,1], default: 1] presolving/subrestartfac = 1 # minimal fraction of integer variables removed after restart to allow for an additional restart # [type: real, advanced: TRUE, range: [0,1], default: 0.1] presolving/restartminred = 0.1 # should multi-aggregation of variables be forbidden? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] presolving/donotmultaggr = FALSE # should aggregation of variables be forbidden? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] presolving/donotaggr = FALSE # maximal number of variables priced in per pricing round # [type: int, advanced: FALSE, range: [1,2147483647], default: 100] pricing/maxvars = 100 # maximal number of priced variables at the root node # [type: int, advanced: FALSE, range: [1,2147483647], default: 2000] pricing/maxvarsroot = 2000 # pricing is aborted, if fac * pricing/maxvars pricing candidates were found # [type: real, advanced: FALSE, range: [1,1.79769313486232e+308], default: 2] pricing/abortfac = 2 # should variables created at the current node be deleted when the node is solved in case they are not present in the LP anymore? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] pricing/delvars = FALSE # should variables created at the root node be deleted when the root is solved in case they are not present in the LP anymore? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] pricing/delvarsroot = FALSE # should the variables be labelled for the application of Benders' decomposition? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] decomposition/benderslabels = FALSE # if a decomposition exists, should Benders' decomposition be applied? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] decomposition/applybenders = FALSE # maximum number of edges in block graph computation (-1: no limit, 0: disable block graph computation) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 10000] decomposition/maxgraphedge = 10000 # the tolerance used for checking optimality in Benders' decomposition. tol where optimality is given by LB + tol > UB. # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 1e-06] benders/solutiontol = 1e-06 # should Benders' cuts be generated from the solution to the LP relaxation? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] benders/cutlpsol = TRUE # should Benders' decomposition be copied for use in sub-SCIPs? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] benders/copybenders = TRUE # maximal number of propagation rounds per node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 100] propagating/maxrounds = 100 # maximal number of propagation rounds in the root node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1000] propagating/maxroundsroot = 1000 # should propagation be aborted immediately? setting this to FALSE could help conflict analysis to produce more conflict constraints # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] propagating/abortoncutoff = TRUE # should reoptimization used? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] reoptimization/enable = FALSE # maximal number of saved nodes # [type: int, advanced: TRUE, range: [-1,2147483647], default: 2147483647] reoptimization/maxsavednodes = 2147483647 # maximal number of bound changes between two stored nodes on one path # [type: int, advanced: TRUE, range: [0,2147483647], default: 2147483647] reoptimization/maxdiffofnodes = 2147483647 # save global constraints to separate infeasible subtrees. # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] reoptimization/globalcons/sepainfsubtrees = TRUE # separate the optimal solution, i.e., for constrained shortest path # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] reoptimization/sepabestsol = FALSE # use variable history of the previous solve if the objctive function has changed only slightly # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] reoptimization/storevarhistory = FALSE # re-use pseudo costs if the objective function changed only slightly # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] reoptimization/usepscost = FALSE # at which reopttype should the LP be solved? (1: transit, 3: strong branched, 4: w/ added logicor, 5: only leafs). # [type: int, advanced: TRUE, range: [1,5], default: 1] reoptimization/solvelp = 1 # maximal number of bound changes at node to skip solving the LP # [type: int, advanced: TRUE, range: [0,2147483647], default: 1] reoptimization/solvelpdiff = 1 # number of best solutions which should be saved for the following runs. (-1: save all) # [type: int, advanced: TRUE, range: [0,2147483647], default: 2147483647] reoptimization/savesols = 2147483647 # similarity of two sequential objective function to disable solving the root LP. # [type: real, advanced: TRUE, range: [-1,1], default: 0.8] reoptimization/objsimrootLP = 0.8 # similarity of two objective functions to re-use stored solutions # [type: real, advanced: TRUE, range: [-1,1], default: -1] reoptimization/objsimsol = -1 # minimum similarity for using reoptimization of the search tree. # [type: real, advanced: TRUE, range: [-1,1], default: -1] reoptimization/delay = -1 # time limit over all reoptimization rounds?. # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] reoptimization/commontimelimit = FALSE # replace branched inner nodes by their child nodes, if the number of bound changes is not to large # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] reoptimization/shrinkinner = TRUE # try to fix variables at the root node before reoptimizing by probing like strong branching # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] reoptimization/strongbranchinginit = TRUE # delete stored nodes which were not reoptimized # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] reoptimization/reducetofrontier = TRUE # force a restart if the last n optimal solutions were found by heuristic reoptsols # [type: int, advanced: TRUE, range: [1,2147483647], default: 3] reoptimization/forceheurrestart = 3 # save constraint propagations # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] reoptimization/saveconsprop = FALSE # use constraints to reconstruct the subtree pruned be dual reduction when reactivating the node # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] reoptimization/usesplitcons = TRUE # use 'd'efault, 'r'andom or a variable ordering based on 'i'nference score for interdiction branching used during reoptimization # [type: char, advanced: TRUE, range: {dir}, default: d] reoptimization/varorderinterdiction = d # reoptimize cuts found at the root node # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] reoptimization/usecuts = FALSE # maximal age of a cut to be use for reoptimization # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] reoptimization/maxcutage = 0 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separation (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: FALSE, range: [0,1], default: 1] separating/maxbounddist = 1 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying local separation (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: FALSE, range: [0,1], default: 0] separating/maxlocalbounddist = 0 # maximal ratio between coefficients in strongcg, cmir, and flowcover cuts # [type: real, advanced: FALSE, range: [1,1e+98], default: 10000] separating/maxcoefratio = 10000 # maximal ratio between coefficients (as factor of 1/feastol) to ensure in rowprep cleanup # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 10] separating/maxcoefratiofacrowprep = 10 # minimal efficacy for a cut to enter the LP # [type: real, advanced: FALSE, range: [0,1e+98], default: 0.0001] separating/minefficacy = 0.0001 # minimal efficacy for a cut to enter the LP in the root node # [type: real, advanced: FALSE, range: [0,1e+98], default: 0.0001] separating/minefficacyroot = 0.0001 # minimum cut activity quotient to convert cuts into constraints during a restart (0.0: all cuts are converted) # [type: real, advanced: FALSE, range: [0,1], default: 0.8] separating/minactivityquot = 0.8 # function used for calc. scalar prod. in orthogonality test ('e'uclidean, 'd'iscrete) # [type: char, advanced: TRUE, range: {ed}, default: e] separating/orthofunc = e # row norm to use for efficacy calculation ('e'uclidean, 'm'aximum, 's'um, 'd'iscrete) # [type: char, advanced: TRUE, range: {emsd}, default: e] separating/efficacynorm = e # cut selection during restart ('a'ge, activity 'q'uotient) # [type: char, advanced: TRUE, range: {aq}, default: a] separating/cutselrestart = a # cut selection for sub SCIPs ('a'ge, activity 'q'uotient) # [type: char, advanced: TRUE, range: {aq}, default: a] separating/cutselsubscip = a # should cutpool separate only cuts with high relative efficacy? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/filtercutpoolrel = TRUE # maximal number of runs for which separation is enabled (-1: unlimited) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] separating/maxruns = -1 # maximal number of separation rounds per node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] separating/maxrounds = -1 # maximal number of separation rounds in the root node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] separating/maxroundsroot = -1 # maximal number of separation rounds in the root node of a subsequent run (-1: unlimited) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] separating/maxroundsrootsubrun = -1 # maximal additional number of separation rounds in subsequent price-and-cut loops (-1: no additional restriction) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 1] separating/maxaddrounds = 1 # maximal number of consecutive separation rounds without objective or integrality improvement in local nodes (-1: no additional restriction) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1] separating/maxstallrounds = 1 # maximal number of consecutive separation rounds without objective or integrality improvement in the root node (-1: no additional restriction) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 10] separating/maxstallroundsroot = 10 # maximal number of cuts separated per separation round (0: disable local separation) # [type: int, advanced: FALSE, range: [0,2147483647], default: 100] separating/maxcuts = 100 # maximal number of separated cuts at the root node (0: disable root node separation) # [type: int, advanced: FALSE, range: [0,2147483647], default: 2000] separating/maxcutsroot = 2000 # maximum age a cut can reach before it is deleted from the global cut pool, or -1 to keep all cuts # [type: int, advanced: TRUE, range: [-1,2147483647], default: 80] separating/cutagelimit = 80 # separation frequency for the global cut pool (-1: disable global cut pool, 0: only separate pool at the root) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] separating/poolfreq = 10 # parallel optimisation mode, 0: opportunistic or 1: deterministic. # [type: int, advanced: FALSE, range: [0,1], default: 1] parallel/mode = 1 # the minimum number of threads used during parallel solve # [type: int, advanced: FALSE, range: [0,64], default: 1] parallel/minnthreads = 1 # the maximum number of threads used during parallel solve # [type: int, advanced: FALSE, range: [0,64], default: 8] parallel/maxnthreads = 8 # set different random seeds in each concurrent solver? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] concurrent/changeseeds = TRUE # use different child selection rules in each concurrent solver? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] concurrent/changechildsel = TRUE # should the concurrent solvers communicate global variable bound changes? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] concurrent/commvarbnds = TRUE # should the problem be presolved before it is copied to the concurrent solvers? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] concurrent/presolvebefore = TRUE # maximum number of solutions that will be shared in a one synchronization # [type: int, advanced: FALSE, range: [0,2147483647], default: 5131912] concurrent/initseed = 5131912 # initial frequency of synchronization with other threads # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 10] concurrent/sync/freqinit = 10 # maximal frequency of synchronization with other threads # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 10] concurrent/sync/freqmax = 10 # factor by which the frequency of synchronization is changed # [type: real, advanced: FALSE, range: [1,1.79769313486232e+308], default: 1.5] concurrent/sync/freqfactor = 1.5 # when adapting the synchronization frequency this value is the targeted relative difference by which the absolute gap decreases per synchronization # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.001] concurrent/sync/targetprogress = 0.001 # maximum number of solutions that will be shared in a single synchronization # [type: int, advanced: FALSE, range: [0,1000], default: 3] concurrent/sync/maxnsols = 3 # maximum number of synchronizations before reading is enforced regardless of delay # [type: int, advanced: TRUE, range: [0,100], default: 7] concurrent/sync/maxnsyncdelay = 7 # minimum delay before synchronization data is read # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 10] concurrent/sync/minsyncdelay = 10 # how many of the N best solutions should be considered for synchronization? # [type: int, advanced: FALSE, range: [0,2147483647], default: 10] concurrent/sync/nbestsols = 10 # path prefix for parameter setting files of concurrent solvers # [type: string, advanced: FALSE, default: ""] concurrent/paramsetprefix = "" # default clock type (1: CPU user seconds, 2: wall clock time) # [type: int, advanced: FALSE, range: [1,2], default: 2] timing/clocktype = 2 # is timing enabled? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] timing/enabled = TRUE # belongs reading time to solving time? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] timing/reading = FALSE # should clock checks of solving time be performed less frequently (note: time limit could be exceeded slightly) # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] timing/rareclockcheck = FALSE # should timing for statistic output be performed? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] timing/statistictiming = TRUE # should time for evaluation in NLP solves be measured? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] timing/nlpieval = FALSE # name of the VBC tool output file, or - if no VBC tool output should be created # [type: string, advanced: FALSE, default: "-"] visual/vbcfilename = "-" # name of the BAK tool output file, or - if no BAK tool output should be created # [type: string, advanced: FALSE, default: "-"] visual/bakfilename = "-" # should the real solving time be used instead of a time step counter in visualization? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] visual/realtime = TRUE # should the node where solutions are found be visualized? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] visual/dispsols = FALSE # should lower bound information be visualized? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] visual/displb = FALSE # should be output the external value of the objective? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] visual/objextern = TRUE # should model constraints be marked as initial? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] reading/initialconss = TRUE # should model constraints be subject to aging? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] reading/dynamicconss = TRUE # should columns be added and removed dynamically to the LP? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] reading/dynamiccols = FALSE # should rows be added and removed dynamically to the LP? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] reading/dynamicrows = FALSE # should all constraints be written (including the redundant constraints)? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] write/allconss = FALSE # should variables set to zero be printed? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] write/printzeros = FALSE # when writing a generic problem the index for the first variable should start with? # [type: int, advanced: FALSE, range: [0,1073741823], default: 0] write/genericnamesoffset = 0 # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/and/sepafreq = 1 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/and/propfreq = 1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/and/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: 100] constraints/and/eagerfreq = 100 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/and/maxprerounds = -1 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/and/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/and/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 20] constraints/and/presoltiming = 20 # should pairwise constraint comparison be performed in presolving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/and/presolpairwise = TRUE # should hash table be used for detecting redundant constraints in advance # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/and/presolusehashing = TRUE # should the AND-constraint get linearized and removed (in presolving)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/and/linearize = FALSE # should cuts be separated during LP enforcing? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/and/enforcecuts = TRUE # should an aggregated linearization be used? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/and/aggrlinearization = FALSE # should all binary resultant variables be upgraded to implicit binary variables? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/and/upgraderesultant = TRUE # should dual presolving be performed? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/and/dualpresolving = TRUE # maximal percantage of continuous variables within a conflict # [type: real, advanced: FALSE, range: [0,1], default: 0.4] conflict/bounddisjunction/continuousfrac = 0.4 # priority of conflict handler <bounddisjunction> # [type: int, advanced: TRUE, range: [-2147483648,2147483647], default: -3000000] conflict/bounddisjunction/priority = -3000000 # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] constraints/bounddisjunction/sepafreq = -1 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/bounddisjunction/propfreq = 1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/bounddisjunction/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: 100] constraints/bounddisjunction/eagerfreq = 100 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/bounddisjunction/maxprerounds = -1 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/bounddisjunction/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/bounddisjunction/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 4] constraints/bounddisjunction/presoltiming = 4 # priority of conflict handler <linear> # [type: int, advanced: TRUE, range: [-2147483648,2147483647], default: -1000000] conflict/linear/priority = -1000000 # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] constraints/linear/sepafreq = 0 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/linear/propfreq = 1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/linear/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: 100] constraints/linear/eagerfreq = 100 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/linear/maxprerounds = -1 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/linear/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/linear/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 20] constraints/linear/presoltiming = 20 # multiplier on propagation frequency, how often the bounds are tightened (-1: never, 0: only at root) # [type: int, advanced: TRUE, range: [-1,65534], default: 1] constraints/linear/tightenboundsfreq = 1 # maximal number of separation rounds per node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 5] constraints/linear/maxrounds = 5 # maximal number of separation rounds per node in the root node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] constraints/linear/maxroundsroot = -1 # maximal number of cuts separated per separation round # [type: int, advanced: FALSE, range: [0,2147483647], default: 50] constraints/linear/maxsepacuts = 50 # maximal number of cuts separated per separation round in the root node # [type: int, advanced: FALSE, range: [0,2147483647], default: 200] constraints/linear/maxsepacutsroot = 200 # should pairwise constraint comparison be performed in presolving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/presolpairwise = TRUE # should hash table be used for detecting redundant constraints in advance # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/presolusehashing = TRUE # number for minimal pairwise presolve comparisons # [type: int, advanced: TRUE, range: [1,2147483647], default: 200000] constraints/linear/nmincomparisons = 200000 # minimal gain per minimal pairwise presolve comparisons to repeat pairwise comparison round # [type: real, advanced: TRUE, range: [0,1], default: 1e-06] constraints/linear/mingainpernmincomparisons = 1e-06 # maximal allowed relative gain in maximum norm for constraint aggregation (0.0: disable constraint aggregation) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] constraints/linear/maxaggrnormscale = 0 # maximum activity delta to run easy propagation on linear constraint (faster, but numerically less stable) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 1000000] constraints/linear/maxeasyactivitydelta = 1000000 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for separating knapsack cardinality cuts # [type: real, advanced: TRUE, range: [0,1], default: 0] constraints/linear/maxcardbounddist = 0 # should all constraints be subject to cardinality cut generation instead of only the ones with non-zero dual value? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] constraints/linear/separateall = FALSE # should presolving search for aggregations in equations # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/aggregatevariables = TRUE # should presolving try to simplify inequalities # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/simplifyinequalities = TRUE # should dual presolving steps be performed? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/dualpresolving = TRUE # should stuffing of singleton continuous variables be performed? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/singletonstuffing = TRUE # should single variable stuffing be performed, which tries to fulfill constraints using the cheapest variable? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/linear/singlevarstuffing = FALSE # apply binaries sorting in decr. order of coeff abs value? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/sortvars = TRUE # should the violation for a constraint with side 0.0 be checked relative to 1.0 (FALSE) or to the maximum absolute value in the activity (TRUE)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/linear/checkrelmaxabs = FALSE # should presolving try to detect constraints parallel to the objective function defining an upper bound and prevent these constraints from entering the LP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/detectcutoffbound = TRUE # should presolving try to detect constraints parallel to the objective function defining a lower bound and prevent these constraints from entering the LP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/detectlowerbound = TRUE # should presolving try to detect subsets of constraints parallel to the objective function? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/detectpartialobjective = TRUE # should presolving and propagation try to improve bounds, detect infeasibility, and extract sub-constraints from ranged rows and equations? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/rangedrowpropagation = TRUE # should presolving and propagation extract sub-constraints from ranged rows and equations? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/rangedrowartcons = TRUE # maximum depth to apply ranged row propagation # [type: int, advanced: TRUE, range: [0,2147483647], default: 2147483647] constraints/linear/rangedrowmaxdepth = 2147483647 # frequency for applying ranged row propagation # [type: int, advanced: TRUE, range: [1,65534], default: 1] constraints/linear/rangedrowfreq = 1 # should multi-aggregations only be performed if the constraint can be removed afterwards? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/linear/multaggrremove = FALSE # maximum coefficient dynamism (ie. maxabsval / minabsval) for primal multiaggregation # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 1000] constraints/linear/maxmultaggrquot = 1000 # maximum coefficient dynamism (ie. maxabsval / minabsval) for dual multiaggregation # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 1e+20] constraints/linear/maxdualmultaggrquot = 1e+20 # should Cliques be extracted? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/extractcliques = TRUE # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] constraints/indicator/sepafreq = 10 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/indicator/propfreq = 1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/indicator/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: 100] constraints/indicator/eagerfreq = 100 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/indicator/maxprerounds = -1 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 4] constraints/indicator/presoltiming = 4 # enable linear upgrading for constraint handler <indicator> # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/upgrade/indicator = TRUE # priority of conflict handler <indicatorconflict> # [type: int, advanced: TRUE, range: [-2147483648,2147483647], default: 200000] conflict/indicatorconflict/priority = 200000 # Branch on indicator constraints in enforcing? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/branchindicators = FALSE # Generate logicor constraints instead of cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/genlogicor = FALSE # Add coupling constraints or rows if big-M is small enough? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/indicator/addcoupling = TRUE # maximum coefficient for binary variable in coupling constraint # [type: real, advanced: TRUE, range: [0,1000000000], default: 10000] constraints/indicator/maxcouplingvalue = 10000 # Add initial variable upper bound constraints, if 'addcoupling' is true? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/addcouplingcons = FALSE # Should the coupling inequalities be separated dynamically? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/indicator/sepacouplingcuts = TRUE # Allow to use local bounds in order to separate coupling inequalities? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/sepacouplinglocal = FALSE # maximum coefficient for binary variable in separated coupling constraint # [type: real, advanced: TRUE, range: [0,1000000000], default: 10000] constraints/indicator/sepacouplingvalue = 10000 # Separate cuts based on perspective formulation? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/sepaperspective = FALSE # Allow to use local bounds in order to separate perspective cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/indicator/sepapersplocal = TRUE # maximal number of separated non violated IISs, before separation is stopped # [type: int, advanced: FALSE, range: [0,2147483647], default: 3] constraints/indicator/maxsepanonviolated = 3 # Update bounds of original variables for separation? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/updatebounds = FALSE # maximum estimated condition of the solution basis matrix of the alternative LP to be trustworthy (0.0 to disable check) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] constraints/indicator/maxconditionaltlp = 0 # maximal number of cuts separated per separation round # [type: int, advanced: FALSE, range: [0,2147483647], default: 100] constraints/indicator/maxsepacuts = 100 # maximal number of cuts separated per separation round in the root node # [type: int, advanced: FALSE, range: [0,2147483647], default: 2000] constraints/indicator/maxsepacutsroot = 2000 # Remove indicator constraint if corresponding variable bound constraint has been added? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/removeindicators = FALSE # Do not generate indicator constraint, but a bilinear constraint instead? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/generatebilinear = FALSE # Scale slack variable coefficient at construction time? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/scaleslackvar = FALSE # Try to make solutions feasible by setting indicator variables? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/indicator/trysolutions = TRUE # In enforcing try to generate cuts (only if sepaalternativelp is true)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/enforcecuts = FALSE # Should dual reduction steps be performed? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/indicator/dualreductions = TRUE # Add opposite inequality in nodes in which the binary variable has been fixed to 0? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/addopposite = FALSE # Try to upgrade bounddisjunction conflicts by replacing slack variables? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/conflictsupgrade = FALSE # fraction of binary variables that need to be fixed before restart occurs (in forcerestart) # [type: real, advanced: TRUE, range: [0,1], default: 0.9] constraints/indicator/restartfrac = 0.9 # Collect other constraints to alternative LP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/useotherconss = FALSE # Use objective cut with current best solution to alternative LP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/useobjectivecut = FALSE # Try to construct a feasible solution from a cover? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/trysolfromcover = FALSE # Try to upgrade linear constraints to indicator constraints? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/upgradelinear = FALSE # Separate using the alternative LP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/sepaalternativelp = FALSE # Force restart if absolute gap is 1 or enough binary variables have been fixed? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/forcerestart = FALSE # Decompose problem (do not generate linear constraint if all variables are continuous)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/indicator/nolinconscont = FALSE # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] constraints/integral/sepafreq = -1 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] constraints/integral/propfreq = -1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/integral/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] constraints/integral/eagerfreq = -1 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 0] constraints/integral/maxprerounds = 0 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/integral/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/integral/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 28] constraints/integral/presoltiming = 28 # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/linking/sepafreq = 1 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/linking/propfreq = 1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/linking/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: 100] constraints/linking/eagerfreq = 100 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/linking/maxprerounds = -1 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/linking/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/linking/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 8] constraints/linking/presoltiming = 8 # this constraint will not propagate or separate, linear and setppc are used? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] constraints/linking/linearize = FALSE # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] constraints/knapsack/sepafreq = 0 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/knapsack/propfreq = 1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/knapsack/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: 100] constraints/knapsack/eagerfreq = 100 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/knapsack/maxprerounds = -1 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/knapsack/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/knapsack/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 28] constraints/knapsack/presoltiming = 28 # enable linear upgrading for constraint handler <knapsack> # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/upgrade/knapsack = TRUE # multiplier on separation frequency, how often knapsack cuts are separated (-1: never, 0: only at root) # [type: int, advanced: TRUE, range: [-1,65534], default: 1] constraints/knapsack/sepacardfreq = 1 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for separating knapsack cuts # [type: real, advanced: TRUE, range: [0,1], default: 0] constraints/knapsack/maxcardbounddist = 0 # lower clique size limit for greedy clique extraction algorithm (relative to largest clique) # [type: real, advanced: TRUE, range: [0,1], default: 0.5] constraints/knapsack/cliqueextractfactor = 0.5 # maximal number of separation rounds per node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 5] constraints/knapsack/maxrounds = 5 # maximal number of separation rounds per node in the root node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] constraints/knapsack/maxroundsroot = -1 # maximal number of cuts separated per separation round # [type: int, advanced: FALSE, range: [0,2147483647], default: 50] constraints/knapsack/maxsepacuts = 50 # maximal number of cuts separated per separation round in the root node # [type: int, advanced: FALSE, range: [0,2147483647], default: 200] constraints/knapsack/maxsepacutsroot = 200 # should disaggregation of knapsack constraints be allowed in preprocessing? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/knapsack/disaggregation = TRUE # should presolving try to simplify knapsacks # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/knapsack/simplifyinequalities = TRUE # should negated clique information be used in solving process # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/knapsack/negatedclique = TRUE # should pairwise constraint comparison be performed in presolving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/knapsack/presolpairwise = TRUE # should hash table be used for detecting redundant constraints in advance # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/knapsack/presolusehashing = TRUE # should dual presolving steps be performed? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/knapsack/dualpresolving = TRUE # should GUB information be used for separation? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/knapsack/usegubs = FALSE # should presolving try to detect constraints parallel to the objective function defining an upper bound and prevent these constraints from entering the LP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/knapsack/detectcutoffbound = TRUE # should presolving try to detect constraints parallel to the objective function defining a lower bound and prevent these constraints from entering the LP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/knapsack/detectlowerbound = TRUE # should clique partition information be updated when old partition seems outdated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/knapsack/updatecliquepartitions = FALSE # factor on the growth of global cliques to decide when to update a previous (negated) clique partition (used only if updatecliquepartitions is set to TRUE) # [type: real, advanced: TRUE, range: [1,10], default: 1.5] constraints/knapsack/clqpartupdatefac = 1.5 # priority of conflict handler <logicor> # [type: int, advanced: TRUE, range: [-2147483648,2147483647], default: 800000] conflict/logicor/priority = 800000 # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] constraints/logicor/sepafreq = 0 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/logicor/propfreq = 1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/logicor/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: 100] constraints/logicor/eagerfreq = 100 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/logicor/maxprerounds = -1 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/logicor/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/logicor/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 28] constraints/logicor/presoltiming = 28 # enable linear upgrading for constraint handler <logicor> # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/upgrade/logicor = TRUE # should pairwise constraint comparison be performed in presolving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/logicor/presolpairwise = TRUE # should hash table be used for detecting redundant constraints in advance # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/logicor/presolusehashing = TRUE # should dual presolving steps be performed? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/logicor/dualpresolving = TRUE # should negated clique information be used in presolving # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/logicor/negatedclique = TRUE # should implications/cliques be used in presolving # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/logicor/implications = TRUE # should pairwise constraint comparison try to strengthen constraints by removing superflous non-zeros? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/logicor/strengthen = TRUE # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] constraints/or/sepafreq = 0 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/or/propfreq = 1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/or/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: 100] constraints/or/eagerfreq = 100 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/or/maxprerounds = -1 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/or/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/or/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 8] constraints/or/presoltiming = 8 # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] constraints/orbitope/sepafreq = -1 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/orbitope/propfreq = 1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/orbitope/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] constraints/orbitope/eagerfreq = -1 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/orbitope/maxprerounds = -1 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/orbitope/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/orbitope/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 8] constraints/orbitope/presoltiming = 8 # Strengthen orbitope constraints to packing/partioning orbitopes? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/orbitope/checkpporbitope = TRUE # Whether we separate inequalities for full orbitopes? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/orbitope/sepafullorbitope = FALSE # Whether orbitope constraints should be forced to be copied to sub SCIPs. # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/orbitope/forceconscopy = FALSE # priority of conflict handler <setppc> # [type: int, advanced: TRUE, range: [-2147483648,2147483647], default: 700000] conflict/setppc/priority = 700000 # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] constraints/setppc/sepafreq = 0 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/setppc/propfreq = 1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/setppc/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: 100] constraints/setppc/eagerfreq = 100 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/setppc/maxprerounds = -1 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/setppc/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/setppc/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 28] constraints/setppc/presoltiming = 28 # enable linear upgrading for constraint handler <setppc> # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/upgrade/setppc = TRUE # number of children created in pseudo branching (0: disable pseudo branching) # [type: int, advanced: TRUE, range: [0,2147483647], default: 2] constraints/setppc/npseudobranches = 2 # should pairwise constraint comparison be performed in presolving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/setppc/presolpairwise = TRUE # should hash table be used for detecting redundant constraints in advance # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/setppc/presolusehashing = TRUE # should dual presolving steps be performed? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/setppc/dualpresolving = TRUE # should we try to lift variables into other clique constraints, fix variables, aggregate them, and also shrink the amount of variables in clique constraints # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/setppc/cliquelifting = FALSE # should we try to generate extra cliques out of all binary variables to maybe fasten redundant constraint detection # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/setppc/addvariablesascliques = FALSE # should we try to shrink the number of variables in a clique constraints, by replacing more than one variable by only one # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/setppc/cliqueshrinking = TRUE # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] constraints/varbound/sepafreq = 0 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/varbound/propfreq = 1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/varbound/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: 100] constraints/varbound/eagerfreq = 100 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/varbound/maxprerounds = -1 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/varbound/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/varbound/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 12] constraints/varbound/presoltiming = 12 # enable linear upgrading for constraint handler <varbound> # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/upgrade/varbound = TRUE # should pairwise constraint comparison be performed in presolving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/varbound/presolpairwise = TRUE # maximum coefficient in varbound constraint to be added as a row into LP # [type: real, advanced: TRUE, range: [0,1e+20], default: 1000000000] constraints/varbound/maxlpcoef = 1000000000 # should bound widening be used in conflict analysis? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] constraints/varbound/usebdwidening = TRUE # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] constraints/xor/sepafreq = 0 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] constraints/xor/propfreq = 1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/xor/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: 100] constraints/xor/eagerfreq = 100 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] constraints/xor/maxprerounds = -1 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/xor/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/xor/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 28] constraints/xor/presoltiming = 28 # enable linear upgrading for constraint handler <xor> # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] constraints/linear/upgrade/xor = TRUE # should pairwise constraint comparison be performed in presolving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/xor/presolpairwise = TRUE # should hash table be used for detecting redundant constraints in advance? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] constraints/xor/presolusehashing = TRUE # should the extended formulation be added in presolving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/xor/addextendedform = FALSE # should the extended flow formulation be added (nonsymmetric formulation otherwise)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/xor/addflowextended = FALSE # should parity inequalities be separated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/xor/separateparity = FALSE # frequency for applying the Gauss propagator # [type: int, advanced: TRUE, range: [-1,65534], default: 5] constraints/xor/gausspropfreq = 5 # only use improving bounds # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] reading/bndreader/improveonly = FALSE # should fixed and aggregated variables be printed (if not, re-parsing might fail) # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] reading/cipreader/writefixedvars = TRUE # should an artificial objective, depending on the number of clauses a variable appears in, be used? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] reading/cnfreader/useobj = FALSE # have integer variables no upper bound by default (depending on GAMS version)? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] reading/gmsreader/freeints = FALSE # shall characters '#', '*', '+', '/', and '-' in variable and constraint names be replaced by '_'? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] reading/gmsreader/replaceforbiddenchars = FALSE # default M value for big-M reformulation of indicator constraints in case no bound on slack variable is given # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 1000000] reading/gmsreader/bigmdefault = 1000000 # which reformulation to use for indicator constraints: 'b'ig-M, 's'os1 # [type: char, advanced: FALSE, range: {bs}, default: s] reading/gmsreader/indicatorreform = s # is it allowed to use the gams function signpower(x,a)? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] reading/gmsreader/signpower = FALSE # should possible "and" constraint be linearized when writing the lp file? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] reading/lpreader/linearize-and-constraints = TRUE # should an aggregated linearization for and constraints be used? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] reading/lpreader/aggrlinearization-ands = TRUE # should possible "and" constraint be linearized when writing the mps file? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] reading/mpsreader/linearize-and-constraints = TRUE # should an aggregated linearization for and constraints be used? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] reading/mpsreader/aggrlinearization-ands = TRUE # should model constraints be subject to aging? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] reading/opbreader/dynamicconss = FALSE # use '*' between coefficients and variables by writing to problem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] reading/opbreader/multisymbol = FALSE # should the output format be binary(P4) (otherwise plain(P1) format) # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] reading/pbmreader/binary = TRUE # maximum number of rows in the scaled picture (-1 for no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1000] reading/pbmreader/maxrows = 1000 # maximum number of columns in the scaled picture (-1 for no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1000] reading/pbmreader/maxcols = 1000 # priority of presolver <boundshift> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 7900000] presolving/boundshift/priority = 7900000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] presolving/boundshift/maxrounds = 0 # timing mask of presolver <boundshift> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 4] presolving/boundshift/timing = 4 # absolute value of maximum shift # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 9223372036854775807] presolving/boundshift/maxshift = 9223372036854775807 # is flipping allowed (multiplying with -1)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] presolving/boundshift/flipping = TRUE # shift only integer ranges? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] presolving/boundshift/integer = TRUE # priority of presolver <convertinttobin> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 6000000] presolving/convertinttobin/priority = 6000000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] presolving/convertinttobin/maxrounds = 0 # timing mask of presolver <convertinttobin> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 4] presolving/convertinttobin/timing = 4 # absolute value of maximum domain size for converting an integer variable to binaries variables # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 9223372036854775807] presolving/convertinttobin/maxdomainsize = 9223372036854775807 # should only integer variables with a domain size of 2^p - 1 be converted(, there we don't need an knapsack-constraint for restricting the sum of the binaries) # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] presolving/convertinttobin/onlypoweroftwo = FALSE # should only integer variables with uplocks equals downlocks be converted # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] presolving/convertinttobin/samelocksinbothdirections = FALSE # priority of presolver <domcol> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1000] presolving/domcol/priority = -1000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] presolving/domcol/maxrounds = -1 # timing mask of presolver <domcol> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 16] presolving/domcol/timing = 16 # minimal number of pair comparisons # [type: int, advanced: FALSE, range: [100,1048576], default: 1024] presolving/domcol/numminpairs = 1024 # maximal number of pair comparisons # [type: int, advanced: FALSE, range: [1024,1000000000], default: 1048576] presolving/domcol/nummaxpairs = 1048576 # should predictive bound strengthening be applied? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] presolving/domcol/predbndstr = FALSE # should reductions for continuous variables be performed? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] presolving/domcol/continuousred = TRUE # priority of presolver <dualagg> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -12000] presolving/dualagg/priority = -12000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] presolving/dualagg/maxrounds = 0 # timing mask of presolver <dualagg> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 16] presolving/dualagg/timing = 16 # priority of presolver <dualcomp> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -50] presolving/dualcomp/priority = -50 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] presolving/dualcomp/maxrounds = -1 # timing mask of presolver <dualcomp> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 16] presolving/dualcomp/timing = 16 # should only discrete variables be compensated? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] presolving/dualcomp/componlydisvars = FALSE # priority of presolver <dualinfer> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -3000] presolving/dualinfer/priority = -3000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] presolving/dualinfer/maxrounds = 0 # timing mask of presolver <dualinfer> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 16] presolving/dualinfer/timing = 16 # use convex combination of columns for determining dual bounds # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] presolving/dualinfer/twocolcombine = TRUE # maximal number of dual bound strengthening loops # [type: int, advanced: FALSE, range: [-1,2147483647], default: 12] presolving/dualinfer/maxdualbndloops = 12 # maximal number of considered non-zeros within one column (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 100] presolving/dualinfer/maxconsiderednonzeros = 100 # maximal number of consecutive useless hashtable retrieves # [type: int, advanced: TRUE, range: [-1,2147483647], default: 1000] presolving/dualinfer/maxretrievefails = 1000 # maximal number of consecutive useless column combines # [type: int, advanced: TRUE, range: [-1,2147483647], default: 1000] presolving/dualinfer/maxcombinefails = 1000 # Maximum number of hashlist entries as multiple of number of columns in the problem (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10] presolving/dualinfer/maxhashfac = 10 # Maximum number of processed column pairs as multiple of the number of columns in the problem (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 1] presolving/dualinfer/maxpairfac = 1 # Maximum number of row's non-zeros for changing inequality to equality # [type: int, advanced: FALSE, range: [2,2147483647], default: 3] presolving/dualinfer/maxrowsupport = 3 # priority of presolver <gateextraction> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 1000000] presolving/gateextraction/priority = 1000000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] presolving/gateextraction/maxrounds = -1 # timing mask of presolver <gateextraction> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 16] presolving/gateextraction/timing = 16 # should we only try to extract set-partitioning constraints and no and-constraints # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] presolving/gateextraction/onlysetpart = FALSE # should we try to extract set-partitioning constraint out of one logicor and one corresponding set-packing constraint # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] presolving/gateextraction/searchequations = TRUE # order logicor contraints to extract big-gates before smaller ones (-1), do not order them (0) or order them to extract smaller gates at first (1) # [type: int, advanced: TRUE, range: [-1,1], default: 1] presolving/gateextraction/sorting = 1 # priority of presolver <implics> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -10000] presolving/implics/priority = -10000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] presolving/implics/maxrounds = -1 # timing mask of presolver <implics> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 8] presolving/implics/timing = 8 # priority of presolver <inttobinary> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 7000000] presolving/inttobinary/priority = 7000000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] presolving/inttobinary/maxrounds = -1 # timing mask of presolver <inttobinary> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 4] presolving/inttobinary/timing = 4 # priority of presolver <redvub> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -9000000] presolving/redvub/priority = -9000000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] presolving/redvub/maxrounds = 0 # timing mask of presolver <redvub> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 16] presolving/redvub/timing = 16 # priority of presolver <trivial> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 9000000] presolving/trivial/priority = 9000000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] presolving/trivial/maxrounds = -1 # timing mask of presolver <trivial> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 4] presolving/trivial/timing = 4 # priority of presolver <tworowbnd> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -2000] presolving/tworowbnd/priority = -2000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] presolving/tworowbnd/maxrounds = 0 # timing mask of presolver <tworowbnd> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 16] presolving/tworowbnd/timing = 16 # should tworowbnd presolver be copied to sub-SCIPs? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] presolving/tworowbnd/enablecopy = TRUE # maximal number of considered non-zeros within one row (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 100] presolving/tworowbnd/maxconsiderednonzeros = 100 # maximal number of consecutive useless hashtable retrieves # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1000] presolving/tworowbnd/maxretrievefails = 1000 # maximal number of consecutive useless row combines # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1000] presolving/tworowbnd/maxcombinefails = 1000 # Maximum number of hashlist entries as multiple of number of rows in the problem (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 10] presolving/tworowbnd/maxhashfac = 10 # Maximum number of processed row pairs as multiple of the number of rows in the problem (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1] presolving/tworowbnd/maxpairfac = 1 # priority of presolver <sparsify> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -24000] presolving/sparsify/priority = -24000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] presolving/sparsify/maxrounds = -1 # timing mask of presolver <sparsify> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 16] presolving/sparsify/timing = 16 # should sparsify presolver be copied to sub-SCIPs? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] presolving/sparsify/enablecopy = TRUE # should we cancel nonzeros in constraints of the linear constraint handler? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] presolving/sparsify/cancellinear = TRUE # should we forbid cancellations that destroy integer coefficients? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] presolving/sparsify/preserveintcoefs = TRUE # maximal fillin for continuous variables (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] presolving/sparsify/maxcontfillin = 0 # maximal fillin for binary variables (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] presolving/sparsify/maxbinfillin = 0 # maximal fillin for integer variables including binaries (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] presolving/sparsify/maxintfillin = 0 # maximal support of one equality to be used for cancelling (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] presolving/sparsify/maxnonzeros = -1 # maximal number of considered non-zeros within one row (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 70] presolving/sparsify/maxconsiderednonzeros = 70 # order in which to process inequalities ('n'o sorting, 'i'ncreasing nonzeros, 'd'ecreasing nonzeros) # [type: char, advanced: TRUE, range: {nid}, default: d] presolving/sparsify/rowsort = d # limit on the number of useless vs. useful hashtable retrieves as a multiple of the number of constraints # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 100] presolving/sparsify/maxretrievefac = 100 # number of calls to wait until next execution as a multiple of the number of useless calls # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 2] presolving/sparsify/waitingfac = 2 # priority of presolver <dualsparsify> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -240000] presolving/dualsparsify/priority = -240000 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] presolving/dualsparsify/maxrounds = -1 # timing mask of presolver <dualsparsify> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 16] presolving/dualsparsify/timing = 16 # should dualsparsify presolver be copied to sub-SCIPs? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] presolving/dualsparsify/enablecopy = TRUE # should we forbid cancellations that destroy integer coefficients? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] presolving/dualsparsify/preserveintcoefs = FALSE # should we preserve good locked properties of variables (at most one lock in one direction)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] presolving/dualsparsify/preservegoodlocks = FALSE # maximal fillin for continuous variables (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1] presolving/dualsparsify/maxcontfillin = 1 # maximal fillin for binary variables (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1] presolving/dualsparsify/maxbinfillin = 1 # maximal fillin for integer variables including binaries (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1] presolving/dualsparsify/maxintfillin = 1 # maximal number of considered nonzeros within one column (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 70] presolving/dualsparsify/maxconsiderednonzeros = 70 # minimal eliminated nonzeros within one column if we need to add a constraint to the problem # [type: int, advanced: FALSE, range: [0,2147483647], default: 100] presolving/dualsparsify/mineliminatednonzeros = 100 # limit on the number of useless vs. useful hashtable retrieves as a multiple of the number of constraints # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 100] presolving/dualsparsify/maxretrievefac = 100 # number of calls to wait until next execution as a multiple of the number of useless calls # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 2] presolving/dualsparsify/waitingfac = 2 # priority of presolver <stuffing> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -100] presolving/stuffing/priority = -100 # maximal number of presolving rounds the presolver participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] presolving/stuffing/maxrounds = 0 # timing mask of presolver <stuffing> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 16] presolving/stuffing/timing = 16 # priority of node selection rule <bfs> in standard mode # [type: int, advanced: FALSE, range: [-536870912,1073741823], default: 100000] nodeselection/bfs/stdpriority = 100000 # priority of node selection rule <bfs> in memory saving mode # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] nodeselection/bfs/memsavepriority = 0 # minimal plunging depth, before new best node may be selected (-1 for dynamic setting) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] nodeselection/bfs/minplungedepth = -1 # maximal plunging depth, before new best node is forced to be selected (-1 for dynamic setting) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] nodeselection/bfs/maxplungedepth = -1 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where plunging is performed # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0.25] nodeselection/bfs/maxplungequot = 0.25 # priority of node selection rule <breadthfirst> in standard mode # [type: int, advanced: FALSE, range: [-536870912,1073741823], default: -10000] nodeselection/breadthfirst/stdpriority = -10000 # priority of node selection rule <breadthfirst> in memory saving mode # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1000000] nodeselection/breadthfirst/memsavepriority = -1000000 # priority of node selection rule <dfs> in standard mode # [type: int, advanced: FALSE, range: [-536870912,1073741823], default: 0] nodeselection/dfs/stdpriority = 0 # priority of node selection rule <dfs> in memory saving mode # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 100000] nodeselection/dfs/memsavepriority = 100000 # priority of node selection rule <estimate> in standard mode # [type: int, advanced: FALSE, range: [-536870912,1073741823], default: 200000] nodeselection/estimate/stdpriority = 200000 # priority of node selection rule <estimate> in memory saving mode # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 100] nodeselection/estimate/memsavepriority = 100 # minimal plunging depth, before new best node may be selected (-1 for dynamic setting) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] nodeselection/estimate/minplungedepth = -1 # maximal plunging depth, before new best node is forced to be selected (-1 for dynamic setting) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] nodeselection/estimate/maxplungedepth = -1 # maximal quotient (estimate - lowerbound)/(cutoffbound - lowerbound) where plunging is performed # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0.25] nodeselection/estimate/maxplungequot = 0.25 # frequency at which the best node instead of the best estimate is selected (0: never) # [type: int, advanced: FALSE, range: [0,2147483647], default: 10] nodeselection/estimate/bestnodefreq = 10 # depth until breadth-first search is applied # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] nodeselection/estimate/breadthfirstdepth = -1 # number of nodes before doing plunging the first time # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] nodeselection/estimate/plungeoffset = 0 # priority of node selection rule <hybridestim> in standard mode # [type: int, advanced: FALSE, range: [-536870912,1073741823], default: 50000] nodeselection/hybridestim/stdpriority = 50000 # priority of node selection rule <hybridestim> in memory saving mode # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 50] nodeselection/hybridestim/memsavepriority = 50 # minimal plunging depth, before new best node may be selected (-1 for dynamic setting) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] nodeselection/hybridestim/minplungedepth = -1 # maximal plunging depth, before new best node is forced to be selected (-1 for dynamic setting) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] nodeselection/hybridestim/maxplungedepth = -1 # maximal quotient (estimate - lowerbound)/(cutoffbound - lowerbound) where plunging is performed # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0.25] nodeselection/hybridestim/maxplungequot = 0.25 # frequency at which the best node instead of the hybrid best estimate / best bound is selected (0: never) # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] nodeselection/hybridestim/bestnodefreq = 1000 # weight of estimate value in node selection score (0: pure best bound search, 1: pure best estimate search) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] nodeselection/hybridestim/estimweight = 0.1 # priority of node selection rule <restartdfs> in standard mode # [type: int, advanced: FALSE, range: [-536870912,1073741823], default: 10000] nodeselection/restartdfs/stdpriority = 10000 # priority of node selection rule <restartdfs> in memory saving mode # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 50000] nodeselection/restartdfs/memsavepriority = 50000 # frequency for selecting the best node instead of the deepest one # [type: int, advanced: FALSE, range: [0,2147483647], default: 100] nodeselection/restartdfs/selectbestfreq = 100 # count only leaf nodes (otherwise all nodes)? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] nodeselection/restartdfs/countonlyleaves = TRUE # priority of node selection rule <uct> in standard mode # [type: int, advanced: FALSE, range: [-536870912,1073741823], default: 10] nodeselection/uct/stdpriority = 10 # priority of node selection rule <uct> in memory saving mode # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] nodeselection/uct/memsavepriority = 0 # maximum number of nodes before switching to default rule # [type: int, advanced: TRUE, range: [0,1000000], default: 31] nodeselection/uct/nodelimit = 31 # weight for visit quotient of node selection rule # [type: real, advanced: TRUE, range: [0,1], default: 0.1] nodeselection/uct/weight = 0.1 # should the estimate (TRUE) or lower bound of a node be used for UCT score? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] nodeselection/uct/useestimate = FALSE # display activation status of display column <nrank1nodes> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 0] display/nrank1nodes/active = 0 # display activation status of display column <nnodesbelowinc> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 0] display/nnodesbelowinc/active = 0 # should the event handler adapt the solver behavior? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] solvingphases/enabled = FALSE # should the event handler test all phase transitions? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] solvingphases/testmode = FALSE # settings file for feasibility phase -- precedence over emphasis settings # [type: string, advanced: FALSE, default: "-"] solvingphases/feassetname = "-" # settings file for improvement phase -- precedence over emphasis settings # [type: string, advanced: FALSE, default: "-"] solvingphases/improvesetname = "-" # settings file for proof phase -- precedence over emphasis settings # [type: string, advanced: FALSE, default: "-"] solvingphases/proofsetname = "-" # node offset for rank-1 and estimate transitions # [type: longint, advanced: FALSE, range: [1,9223372036854775807], default: 50] solvingphases/nodeoffset = 50 # should the event handler fall back from optimal phase? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] solvingphases/fallback = FALSE # transition method: Possible options are 'e'stimate,'l'ogarithmic regression,'o'ptimal-value based,'r'ank-1 # [type: char, advanced: FALSE, range: {elor}, default: r] solvingphases/transitionmethod = r # should the event handler interrupt the solving process after optimal solution was found? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] solvingphases/interruptoptimal = FALSE # should a restart be applied between the feasibility and improvement phase? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] solvingphases/userestart1to2 = FALSE # should a restart be applied between the improvement and the proof phase? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] solvingphases/userestart2to3 = FALSE # optimal solution value for problem # [type: real, advanced: FALSE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 1e+99] solvingphases/optimalvalue = 1e+99 # x-type for logarithmic regression - (t)ime, (n)odes, (l)p iterations # [type: char, advanced: FALSE, range: {lnt}, default: n] solvingphases/xtype = n # should emphasis settings for the solving phases be used, or settings files? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] solvingphases/useemphsettings = TRUE # priority of propagator <dualfix> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 8000000] propagating/dualfix/priority = 8000000 # frequency for calling propagator <dualfix> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] propagating/dualfix/freq = 0 # should propagator be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/dualfix/delay = FALSE # timing when propagator should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS)) # [type: int, advanced: TRUE, range: [1,15], default: 1] propagating/dualfix/timingmask = 1 # presolving priority of propagator <dualfix> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 8000000] propagating/dualfix/presolpriority = 8000000 # maximal number of presolving rounds the propagator participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] propagating/dualfix/maxprerounds = -1 # timing mask of the presolving method of propagator <dualfix> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [2,60], default: 4] propagating/dualfix/presoltiming = 4 # priority of propagator <genvbounds> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 3000000] propagating/genvbounds/priority = 3000000 # frequency for calling propagator <genvbounds> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] propagating/genvbounds/freq = 1 # should propagator be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/genvbounds/delay = FALSE # timing when propagator should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS)) # [type: int, advanced: TRUE, range: [1,15], default: 15] propagating/genvbounds/timingmask = 15 # presolving priority of propagator <genvbounds> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -2000000] propagating/genvbounds/presolpriority = -2000000 # maximal number of presolving rounds the propagator participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] propagating/genvbounds/maxprerounds = -1 # timing mask of the presolving method of propagator <genvbounds> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [2,60], default: 4] propagating/genvbounds/presoltiming = 4 # apply global propagation? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/genvbounds/global = TRUE # apply genvbounds in root node if no new incumbent was found? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/genvbounds/propinrootnode = TRUE # sort genvbounds and wait for bound change events? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/genvbounds/sort = TRUE # should genvbounds be transformed to (linear) constraints? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/genvbounds/propasconss = FALSE # priority of propagator <obbt> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1000000] propagating/obbt/priority = -1000000 # frequency for calling propagator <obbt> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] propagating/obbt/freq = 0 # should propagator be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/obbt/delay = TRUE # timing when propagator should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS)) # [type: int, advanced: TRUE, range: [1,15], default: 4] propagating/obbt/timingmask = 4 # presolving priority of propagator <obbt> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] propagating/obbt/presolpriority = 0 # maximal number of presolving rounds the propagator participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] propagating/obbt/maxprerounds = -1 # timing mask of the presolving method of propagator <obbt> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [2,60], default: 28] propagating/obbt/presoltiming = 28 # should obbt try to provide genvbounds if possible? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/obbt/creategenvbounds = TRUE # should coefficients in filtering be normalized w.r.t. the domains sizes? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/obbt/normalize = TRUE # try to filter bounds in so-called filter rounds by solving auxiliary LPs? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/obbt/applyfilterrounds = FALSE # try to filter bounds with the LP solution after each solve? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/obbt/applytrivialfilter = TRUE # should we try to generate genvbounds during trivial and aggressive filtering? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/obbt/genvbdsduringfilter = TRUE # try to create genvbounds during separation process? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/obbt/genvbdsduringsepa = TRUE # minimal number of filtered bounds to apply another filter round # [type: int, advanced: TRUE, range: [1,2147483647], default: 2] propagating/obbt/minfilter = 2 # multiple of root node LP iterations used as total LP iteration limit for obbt (<= 0: no limit ) # [type: real, advanced: FALSE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 10] propagating/obbt/itlimitfactor = 10 # multiple of OBBT LP limit used as total LP iteration limit for solving bilinear inequality LPs (< 0 for no limit) # [type: real, advanced: FALSE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 3] propagating/obbt/itlimitfactorbilin = 3 # minimum absolute value of nonconvex eigenvalues for a bilinear term # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.1] propagating/obbt/minnonconvexity = 0.1 # minimum LP iteration limit # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 5000] propagating/obbt/minitlimit = 5000 # feasibility tolerance for reduced costs used in obbt; this value is used if SCIP's dual feastol is greater # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 1e-09] propagating/obbt/dualfeastol = 1e-09 # maximum condition limit used in LP solver (-1.0: no limit) # [type: real, advanced: FALSE, range: [-1,1.79769313486232e+308], default: -1] propagating/obbt/conditionlimit = -1 # minimal relative improve for strengthening bounds # [type: real, advanced: FALSE, range: [0,1], default: 0.001] propagating/obbt/boundstreps = 0.001 # only apply obbt on non-convex variables # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/obbt/onlynonconvexvars = TRUE # should integral bounds be tightened during the probing mode? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/obbt/tightintboundsprobing = TRUE # should continuous bounds be tightened during the probing mode? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/obbt/tightcontboundsprobing = FALSE # solve auxiliary LPs in order to find valid inequalities for bilinear terms? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/obbt/createbilinineqs = TRUE # create linear constraints from inequalities for bilinear terms? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/obbt/createlincons = FALSE # select the type of ordering algorithm which should be used (0: no special ordering, 1: greedy, 2: greedy reverse) # [type: int, advanced: TRUE, range: [0,2], default: 1] propagating/obbt/orderingalgo = 1 # should the obbt LP solution be separated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/obbt/separatesol = FALSE # minimum number of iteration spend to separate an obbt LP solution # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] propagating/obbt/sepaminiter = 0 # maximum number of iteration spend to separate an obbt LP solution # [type: int, advanced: TRUE, range: [0,2147483647], default: 10] propagating/obbt/sepamaxiter = 10 # trigger a propagation round after that many bound tightenings (0: no propagation) # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] propagating/obbt/propagatefreq = 0 # priority of propagator <nlobbt> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1100000] propagating/nlobbt/priority = -1100000 # frequency for calling propagator <nlobbt> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] propagating/nlobbt/freq = -1 # should propagator be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/nlobbt/delay = TRUE # timing when propagator should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS)) # [type: int, advanced: TRUE, range: [1,15], default: 4] propagating/nlobbt/timingmask = 4 # presolving priority of propagator <nlobbt> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] propagating/nlobbt/presolpriority = 0 # maximal number of presolving rounds the propagator participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] propagating/nlobbt/maxprerounds = -1 # timing mask of the presolving method of propagator <nlobbt> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [2,60], default: 28] propagating/nlobbt/presoltiming = 28 # factor for NLP feasibility tolerance # [type: real, advanced: TRUE, range: [0,1], default: 0.01] propagating/nlobbt/feastolfac = 0.01 # factor for NLP relative objective tolerance # [type: real, advanced: TRUE, range: [0,1], default: 0.01] propagating/nlobbt/relobjtolfac = 0.01 # (#convex nlrows)/(#nonconvex nlrows) threshold to apply propagator # [type: real, advanced: TRUE, range: [0,1e+20], default: 0.2] propagating/nlobbt/minnonconvexfrac = 0.2 # minimum (#convex nlrows)/(#linear nlrows) threshold to apply propagator # [type: real, advanced: TRUE, range: [0,1e+20], default: 0.02] propagating/nlobbt/minlinearfrac = 0.02 # should non-initial LP rows be used? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] propagating/nlobbt/addlprows = TRUE # iteration limit of NLP solver; 0 for no limit # [type: int, advanced: TRUE, range: [0,2147483647], default: 500] propagating/nlobbt/nlpiterlimit = 500 # time limit of NLP solver; 0.0 for no limit # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] propagating/nlobbt/nlptimelimit = 0 # verbosity level of NLP solver # [type: int, advanced: TRUE, range: [0,5], default: 0] propagating/nlobbt/nlpverblevel = 0 # LP iteration limit for nlobbt will be this factor times total LP iterations in root node # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 2] propagating/nlobbt/itlimitfactor = 2 # priority of propagator <probing> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -100000] propagating/probing/priority = -100000 # frequency for calling propagator <probing> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] propagating/probing/freq = -1 # should propagator be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/probing/delay = TRUE # timing when propagator should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS)) # [type: int, advanced: TRUE, range: [1,15], default: 4] propagating/probing/timingmask = 4 # presolving priority of propagator <probing> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -100000] propagating/probing/presolpriority = -100000 # maximal number of presolving rounds the propagator participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] propagating/probing/maxprerounds = -1 # timing mask of the presolving method of propagator <probing> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [2,60], default: 16] propagating/probing/presoltiming = 16 # maximal number of runs, probing participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1] propagating/probing/maxruns = 1 # maximal number of propagation rounds in probing subproblems (-1: no limit, 0: auto) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] propagating/probing/proprounds = -1 # maximal number of fixings found, until probing is interrupted (0: don't iterrupt) # [type: int, advanced: TRUE, range: [0,2147483647], default: 25] propagating/probing/maxfixings = 25 # maximal number of successive probings without fixings, until probing is aborted (0: don't abort) # [type: int, advanced: TRUE, range: [0,2147483647], default: 1000] propagating/probing/maxuseless = 1000 # maximal number of successive probings without fixings, bound changes, and implications, until probing is aborted (0: don't abort) # [type: int, advanced: TRUE, range: [0,2147483647], default: 50] propagating/probing/maxtotaluseless = 50 # maximal number of probings without fixings, until probing is aborted (0: don't abort) # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] propagating/probing/maxsumuseless = 0 # maximal depth until propagation is executed(-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] propagating/probing/maxdepth = -1 # priority of propagator <pseudoobj> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 3000000] propagating/pseudoobj/priority = 3000000 # frequency for calling propagator <pseudoobj> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] propagating/pseudoobj/freq = 1 # should propagator be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/pseudoobj/delay = FALSE # timing when propagator should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS)) # [type: int, advanced: TRUE, range: [1,15], default: 7] propagating/pseudoobj/timingmask = 7 # presolving priority of propagator <pseudoobj> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 6000000] propagating/pseudoobj/presolpriority = 6000000 # maximal number of presolving rounds the propagator participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] propagating/pseudoobj/maxprerounds = -1 # timing mask of the presolving method of propagator <pseudoobj> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [2,60], default: 4] propagating/pseudoobj/presoltiming = 4 # minimal number of successive non-binary variable propagations without a bound reduction before aborted # [type: int, advanced: TRUE, range: [0,2147483647], default: 100] propagating/pseudoobj/minuseless = 100 # maximal fraction of non-binary variables with non-zero objective without a bound reduction before aborted # [type: real, advanced: TRUE, range: [0,1], default: 0.1] propagating/pseudoobj/maxvarsfrac = 0.1 # whether to propagate all non-binary variables when we are propagating the root node # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/pseudoobj/propfullinroot = TRUE # propagate new cutoff bound directly globally # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/pseudoobj/propcutoffbound = TRUE # should the propagator be forced even if active pricer are present? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/pseudoobj/force = FALSE # number of variables added after the propagator is reinitialized? # [type: int, advanced: TRUE, range: [0,2147483647], default: 1000] propagating/pseudoobj/maxnewvars = 1000 # use implications to strengthen the propagation of binary variable (increasing the objective change)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/pseudoobj/propuseimplics = TRUE # use implications to strengthen the resolve propagation of binary variable (increasing the objective change)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/pseudoobj/respropuseimplics = TRUE # maximum number of binary variables the implications are used if turned on (-1: unlimited)? # [type: int, advanced: TRUE, range: [-1,2147483647], default: 50000] propagating/pseudoobj/maximplvars = 50000 # priority of propagator <redcost> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 1000000] propagating/redcost/priority = 1000000 # frequency for calling propagator <redcost> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] propagating/redcost/freq = 1 # should propagator be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/redcost/delay = FALSE # timing when propagator should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS)) # [type: int, advanced: TRUE, range: [1,15], default: 6] propagating/redcost/timingmask = 6 # presolving priority of propagator <redcost> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] propagating/redcost/presolpriority = 0 # maximal number of presolving rounds the propagator participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] propagating/redcost/maxprerounds = -1 # timing mask of the presolving method of propagator <redcost> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [2,60], default: 28] propagating/redcost/presoltiming = 28 # should reduced cost fixing be also applied to continuous variables? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] propagating/redcost/continuous = FALSE # should implications be used to strength the reduced cost for binary variables? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] propagating/redcost/useimplics = FALSE # should the propagator be forced even if active pricer are present? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/redcost/force = FALSE # priority of propagator <rootredcost> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 10000000] propagating/rootredcost/priority = 10000000 # frequency for calling propagator <rootredcost> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] propagating/rootredcost/freq = 1 # should propagator be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/rootredcost/delay = FALSE # timing when propagator should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS)) # [type: int, advanced: TRUE, range: [1,15], default: 5] propagating/rootredcost/timingmask = 5 # presolving priority of propagator <rootredcost> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] propagating/rootredcost/presolpriority = 0 # maximal number of presolving rounds the propagator participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] propagating/rootredcost/maxprerounds = -1 # timing mask of the presolving method of propagator <rootredcost> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [2,60], default: 28] propagating/rootredcost/presoltiming = 28 # should only binary variables be propagated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/rootredcost/onlybinary = FALSE # should the propagator be forced even if active pricer are present? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/rootredcost/force = FALSE # priority of propagator <vbounds> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 3000000] propagating/vbounds/priority = 3000000 # frequency for calling propagator <vbounds> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] propagating/vbounds/freq = 1 # should propagator be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/vbounds/delay = FALSE # timing when propagator should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS)) # [type: int, advanced: TRUE, range: [1,15], default: 5] propagating/vbounds/timingmask = 5 # presolving priority of propagator <vbounds> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -90000] propagating/vbounds/presolpriority = -90000 # maximal number of presolving rounds the propagator participates in (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] propagating/vbounds/maxprerounds = -1 # timing mask of the presolving method of propagator <vbounds> (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [2,60], default: 24] propagating/vbounds/presoltiming = 24 # should bound widening be used to initialize conflict analysis? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] propagating/vbounds/usebdwidening = TRUE # should implications be propagated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/vbounds/useimplics = FALSE # should cliques be propagated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/vbounds/usecliques = FALSE # should vbounds be propagated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] propagating/vbounds/usevbounds = TRUE # should the bounds be topologically sorted in advance? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] propagating/vbounds/dotoposort = TRUE # should cliques be regarded for the topological sort? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] propagating/vbounds/sortcliques = FALSE # should cycles in the variable bound graph be identified? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] propagating/vbounds/detectcycles = FALSE # minimum percentage of new cliques to trigger another clique table analysis # [type: real, advanced: FALSE, range: [0,1], default: 0.1] propagating/vbounds/minnewcliques = 0.1 # maximum number of cliques per variable to run clique table analysis in medium presolving # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 50] propagating/vbounds/maxcliquesmedium = 50 # maximum number of cliques per variable to run clique table analysis in exhaustive presolving # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 100] propagating/vbounds/maxcliquesexhaustive = 100 # priority of heuristic <actconsdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1003700] heuristics/actconsdiving/priority = -1003700 # frequency for calling primal heuristic <actconsdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/actconsdiving/freq = -1 # frequency offset for calling primal heuristic <actconsdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 5] heuristics/actconsdiving/freqofs = 5 # maximal depth level to call primal heuristic <actconsdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/actconsdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/actconsdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/actconsdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/actconsdiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/actconsdiving/maxlpiterofs = 1000 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/actconsdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/actconsdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/actconsdiving/maxdiveubquotnosol = 1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 1] heuristics/actconsdiving/maxdiveavgquotnosol = 1 # use one level of backtracking if infeasibility is encountered? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/actconsdiving/backtrack = TRUE # percentage of immediate domain changes during probing to trigger LP resolve # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.15] heuristics/actconsdiving/lpresolvedomchgquot = 0.15 # LP solve frequency for diving heuristics (0: only after enough domain changes have been found) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] heuristics/actconsdiving/lpsolvefreq = 0 # should only LP branching candidates be considered instead of the slower but more general constraint handler diving variable selection? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/actconsdiving/onlylpbranchcands = TRUE # priority of heuristic <adaptivediving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -70000] heuristics/adaptivediving/priority = -70000 # frequency for calling primal heuristic <adaptivediving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 5] heuristics/adaptivediving/freq = 5 # frequency offset for calling primal heuristic <adaptivediving> # [type: int, advanced: FALSE, range: [0,65534], default: 3] heuristics/adaptivediving/freqofs = 3 # maximal depth level to call primal heuristic <adaptivediving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/adaptivediving/maxdepth = -1 # parameter that increases probability of exploration among divesets (only active if seltype is 'e') # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 1] heuristics/adaptivediving/epsilon = 1 # score parameter for selection: minimize either average 'n'odes, LP 'i'terations,backtrack/'c'onflict ratio, 'd'epth, 1 / 's'olutions, or 1 / solutions'u'ccess # [type: char, advanced: FALSE, range: {cdinsu}, default: c] heuristics/adaptivediving/scoretype = c # selection strategy: (e)psilon-greedy, (w)eighted distribution, (n)ext diving # [type: char, advanced: FALSE, range: {enw}, default: w] heuristics/adaptivediving/seltype = w # should the heuristic use its own statistics, or shared statistics? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/adaptivediving/useadaptivecontext = FALSE # coefficient c to decrease initial confidence (calls + 1.0) / (calls + c) in scores # [type: real, advanced: FALSE, range: [1,2147483647], default: 10] heuristics/adaptivediving/selconfidencecoeff = 10 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.1] heuristics/adaptivediving/maxlpiterquot = 0.1 # additional number of allowed LP iterations # [type: longint, advanced: FALSE, range: [0,2147483647], default: 1500] heuristics/adaptivediving/maxlpiterofs = 1500 # weight of incumbent solutions compared to other solutions in computation of LP iteration limit # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 10] heuristics/adaptivediving/bestsolweight = 10 # priority of heuristic <bound> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1107000] heuristics/bound/priority = -1107000 # frequency for calling primal heuristic <bound> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/bound/freq = -1 # frequency offset for calling primal heuristic <bound> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/bound/freqofs = 0 # maximal depth level to call primal heuristic <bound> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/bound/maxdepth = -1 # Should heuristic only be executed if no primal solution was found, yet? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/bound/onlywithoutsol = TRUE # maximum number of propagation rounds during probing (-1 infinity, -2 parameter settings) # [type: int, advanced: TRUE, range: [-1,536870911], default: 0] heuristics/bound/maxproprounds = 0 # to which bound should integer variables be fixed? ('l'ower, 'u'pper, or 'b'oth) # [type: char, advanced: FALSE, range: {lub}, default: l] heuristics/bound/bound = l # priority of heuristic <clique> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 5000] heuristics/clique/priority = 5000 # frequency for calling primal heuristic <clique> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/clique/freq = -1 fix # frequency offset for calling primal heuristic <clique> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/clique/freqofs = 0 # maximal depth level to call primal heuristic <clique> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/clique/maxdepth = -1 # minimum percentage of integer variables that have to be fixable # [type: real, advanced: FALSE, range: [0,1], default: 0.65] heuristics/clique/minintfixingrate = 0.65 # minimum percentage of fixed variables in the sub-MIP # [type: real, advanced: FALSE, range: [0,1], default: 0.65] heuristics/clique/minmipfixingrate = 0.65 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/clique/maxnodes = 5000 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 500] heuristics/clique/nodesofs = 500 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 500] heuristics/clique/minnodes = 500 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/clique/nodesquot = 0.1 # factor by which clique heuristic should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/clique/minimprove = 0.01 # maximum number of propagation rounds during probing (-1 infinity) # [type: int, advanced: TRUE, range: [-1,536870911], default: 2] heuristics/clique/maxproprounds = 2 # should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/clique/copycuts = TRUE # should more variables be fixed based on variable locks if the fixing rate was not reached? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/clique/uselockfixings = FALSE # maximum number of backtracks during the fixing process # [type: int, advanced: TRUE, range: [-1,536870911], default: 10] heuristics/clique/maxbacktracks = 10 # priority of heuristic <coefdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1001000] heuristics/coefdiving/priority = -1001000 # frequency for calling primal heuristic <coefdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/coefdiving/freq = -1 # frequency offset for calling primal heuristic <coefdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 1] heuristics/coefdiving/freqofs = 1 # maximal depth level to call primal heuristic <coefdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/coefdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/coefdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/coefdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/coefdiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/coefdiving/maxlpiterofs = 1000 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/coefdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/coefdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/coefdiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/coefdiving/maxdiveavgquotnosol = 0 # use one level of backtracking if infeasibility is encountered? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/coefdiving/backtrack = TRUE # percentage of immediate domain changes during probing to trigger LP resolve # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.15] heuristics/coefdiving/lpresolvedomchgquot = 0.15 # LP solve frequency for diving heuristics (0: only after enough domain changes have been found) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] heuristics/coefdiving/lpsolvefreq = 0 # should only LP branching candidates be considered instead of the slower but more general constraint handler diving variable selection? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/coefdiving/onlylpbranchcands = FALSE # priority of heuristic <completesol> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] heuristics/completesol/priority = 0 # frequency for calling primal heuristic <completesol> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/completesol/freq = 0 # frequency offset for calling primal heuristic <completesol> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/completesol/freqofs = 0 # maximal depth level to call primal heuristic <completesol> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: 0] heuristics/completesol/maxdepth = 0 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/completesol/maxnodes = 5000 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 50] heuristics/completesol/minnodes = 50 # maximal rate of unknown solution values # [type: real, advanced: FALSE, range: [0,1], default: 0.85] heuristics/completesol/maxunknownrate = 0.85 # should all subproblem solutions be added to the original SCIP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/completesol/addallsols = FALSE # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 500] heuristics/completesol/nodesofs = 500 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/completesol/nodesquot = 0.1 # factor by which the limit on the number of LP depends on the node limit # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 2] heuristics/completesol/lplimfac = 2 # weight of the original objective function (1: only original objective) # [type: real, advanced: TRUE, range: [0.001,1], default: 1] heuristics/completesol/objweight = 1 # bound widening factor applied to continuous variables (0: fix variables to given solution values, 1: relax to global bounds) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/completesol/boundwidening = 0.1 # factor by which the incumbent should be improved at least # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/completesol/minimprove = 0.01 # should number of continuous variables be ignored? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/completesol/ignorecont = FALSE # heuristic stops, if the given number of improving solutions were found (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 5] heuristics/completesol/solutions = 5 # maximal number of iterations in propagation (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 10] heuristics/completesol/maxproprounds = 10 # should the heuristic run before presolving? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/completesol/beforepresol = TRUE # maximal number of LP iterations (-1: no limit) # [type: longint, advanced: FALSE, range: [-1,9223372036854775807], default: -1] heuristics/completesol/maxlpiter = -1 # maximal number of continuous variables after presolving # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] heuristics/completesol/maxcontvars = -1 # priority of heuristic <conflictdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1000100] heuristics/conflictdiving/priority = -1000100 # frequency for calling primal heuristic <conflictdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/conflictdiving/freq = 10 # frequency offset for calling primal heuristic <conflictdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/conflictdiving/freqofs = 0 # maximal depth level to call primal heuristic <conflictdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/conflictdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/conflictdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/conflictdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.15] heuristics/conflictdiving/maxlpiterquot = 0.15 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/conflictdiving/maxlpiterofs = 1000 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/conflictdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/conflictdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/conflictdiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/conflictdiving/maxdiveavgquotnosol = 0 # use one level of backtracking if infeasibility is encountered? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/conflictdiving/backtrack = TRUE # percentage of immediate domain changes during probing to trigger LP resolve # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.15] heuristics/conflictdiving/lpresolvedomchgquot = 0.15 # LP solve frequency for diving heuristics (0: only after enough domain changes have been found) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] heuristics/conflictdiving/lpsolvefreq = 0 # should only LP branching candidates be considered instead of the slower but more general constraint handler diving variable selection? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/conflictdiving/onlylpbranchcands = FALSE # try to maximize the violation # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/conflictdiving/maxviol = TRUE # perform rounding like coefficient diving # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/conflictdiving/likecoef = FALSE # minimal number of conflict locks per variable # [type: int, advanced: TRUE, range: [0,2147483647], default: 5] heuristics/conflictdiving/minconflictlocks = 5 # weight used in a convex combination of conflict and variable locks # [type: real, advanced: TRUE, range: [0,1], default: 0.75] heuristics/conflictdiving/lockweight = 0.75 # priority of heuristic <crossover> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1104000] heuristics/crossover/priority = -1104000 # frequency for calling primal heuristic <crossover> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 30] heuristics/crossover/freq = 30 # frequency offset for calling primal heuristic <crossover> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/crossover/freqofs = 0 # maximal depth level to call primal heuristic <crossover> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/crossover/maxdepth = -1 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 500] heuristics/crossover/nodesofs = 500 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/crossover/maxnodes = 5000 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 50] heuristics/crossover/minnodes = 50 # number of solutions to be taken into account # [type: int, advanced: FALSE, range: [2,2147483647], default: 3] heuristics/crossover/nusedsols = 3 # number of nodes without incumbent change that heuristic should wait # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 200] heuristics/crossover/nwaitingnodes = 200 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/crossover/nodesquot = 0.1 # minimum percentage of integer variables that have to be fixed # [type: real, advanced: FALSE, range: [0,1], default: 0.666] heuristics/crossover/minfixingrate = 0.666 # factor by which Crossover should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/crossover/minimprove = 0.01 # factor by which the limit on the number of LP depends on the node limit # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 2] heuristics/crossover/lplimfac = 2 # should the choice which sols to take be randomized? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/crossover/randomization = TRUE # should the nwaitingnodes parameter be ignored at the root node? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/crossover/dontwaitatroot = FALSE # should subproblem be created out of the rows in the LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/crossover/uselprows = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/crossover/copycuts = TRUE # should the subproblem be permuted to increase diversification? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/crossover/permute = FALSE # limit on number of improving incumbent solutions in sub-CIP # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] heuristics/crossover/bestsollimit = -1 # should uct node selection be used at the beginning of the search? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/crossover/useuct = FALSE # priority of heuristic <dins> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1105000] heuristics/dins/priority = -1105000 # frequency for calling primal heuristic <dins> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/dins/freq = -1 # frequency offset for calling primal heuristic <dins> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/dins/freqofs = 0 # maximal depth level to call primal heuristic <dins> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/dins/maxdepth = -1 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 5000] heuristics/dins/nodesofs = 5000 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.05] heuristics/dins/nodesquot = 0.05 # minimum number of nodes required to start the subproblem # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 50] heuristics/dins/minnodes = 50 # number of pool-solutions to be checked for flag array update (for hard fixing of binary variables) # [type: int, advanced: FALSE, range: [1,2147483647], default: 5] heuristics/dins/solnum = 5 # radius (using Manhattan metric) of the incumbent's neighborhood to be searched # [type: int, advanced: FALSE, range: [1,2147483647], default: 18] heuristics/dins/neighborhoodsize = 18 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/dins/maxnodes = 5000 # factor by which dins should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/dins/minimprove = 0.01 # number of nodes without incumbent change that heuristic should wait # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 200] heuristics/dins/nwaitingnodes = 200 # factor by which the limit on the number of LP depends on the node limit # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 1.5] heuristics/dins/lplimfac = 1.5 # minimum percentage of integer variables that have to be fixable # [type: real, advanced: FALSE, range: [0,1], default: 0.3] heuristics/dins/minfixingrate = 0.3 # should subproblem be created out of the rows in the LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/dins/uselprows = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/dins/copycuts = TRUE # should uct node selection be used at the beginning of the search? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/dins/useuct = FALSE # limit on number of improving incumbent solutions in sub-CIP # [type: int, advanced: FALSE, range: [-1,2147483647], default: 3] heuristics/dins/bestsollimit = 3 # priority of heuristic <distributiondiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1003300] heuristics/distributiondiving/priority = -1003300 # frequency for calling primal heuristic <distributiondiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/distributiondiving/freq = 10 # frequency offset for calling primal heuristic <distributiondiving> # [type: int, advanced: FALSE, range: [0,65534], default: 3] heuristics/distributiondiving/freqofs = 3 # maximal depth level to call primal heuristic <distributiondiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/distributiondiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/distributiondiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/distributiondiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/distributiondiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/distributiondiving/maxlpiterofs = 1000 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/distributiondiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/distributiondiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/distributiondiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/distributiondiving/maxdiveavgquotnosol = 0 # use one level of backtracking if infeasibility is encountered? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/distributiondiving/backtrack = TRUE # percentage of immediate domain changes during probing to trigger LP resolve # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.15] heuristics/distributiondiving/lpresolvedomchgquot = 0.15 # LP solve frequency for diving heuristics (0: only after enough domain changes have been found) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] heuristics/distributiondiving/lpsolvefreq = 0 # should only LP branching candidates be considered instead of the slower but more general constraint handler diving variable selection? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/distributiondiving/onlylpbranchcands = TRUE # the score;largest 'd'ifference, 'l'owest cumulative probability,'h'ighest c.p., 'v'otes lowest c.p., votes highest c.p.('w'), 'r'evolving # [type: char, advanced: TRUE, range: {lvdhwr}, default: r] heuristics/distributiondiving/scoreparam = r # priority of heuristic <dps> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 75000] heuristics/dps/priority = 75000 # frequency for calling primal heuristic <dps> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/dps/freq = -1 # frequency offset for calling primal heuristic <dps> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/dps/freqofs = 0 # maximal depth level to call primal heuristic <dps> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/dps/maxdepth = -1 # maximal number of iterations # [type: int, advanced: FALSE, range: [1,2147483647], default: 50] heuristics/dps/maxiterations = 50 # maximal linking score of used decomposition (equivalent to percentage of linking constraints) # [type: real, advanced: FALSE, range: [0,1], default: 1] heuristics/dps/maxlinkscore = 1 # multiplier for absolute increase of penalty parameters (0: no increase) # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 100] heuristics/dps/penalty = 100 # should the problem get reoptimized with the original objective function? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/dps/reoptimize = FALSE # should solutions get reused in subproblems? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/dps/reuse = FALSE # priority of heuristic <dualval> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] heuristics/dualval/priority = 0 # frequency for calling primal heuristic <dualval> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/dualval/freq = -1 # frequency offset for calling primal heuristic <dualval> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/dualval/freqofs = 0 # maximal depth level to call primal heuristic <dualval> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/dualval/maxdepth = -1 # exit if objective doesn't improve # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/dualval/forceimprovements = FALSE # add constraint to ensure that discrete vars are improving # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/dualval/onlycheaper = TRUE # disable the heuristic if it was not called at a leaf of the B&B tree # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/dualval/onlyleaves = FALSE # relax the indicator variables by introducing continuous copies # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/dualval/relaxindicators = FALSE # relax the continous variables # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/dualval/relaxcontvars = FALSE # verblevel of the heuristic, default is 0 to display nothing # [type: int, advanced: FALSE, range: [0,4], default: 0] heuristics/dualval/heurverblevel = 0 # verblevel of the nlp solver, can be 0 or 1 # [type: int, advanced: FALSE, range: [0,1], default: 0] heuristics/dualval/nlpverblevel = 0 # number of ranks that should be displayed when the heuristic is called # [type: int, advanced: FALSE, range: [0,2147483647], default: 10] heuristics/dualval/rankvalue = 10 # maximal number of recursive calls of the heuristic (if dynamicdepth is off) # [type: int, advanced: FALSE, range: [0,2147483647], default: 25] heuristics/dualval/maxcalls = 25 # says if and how the recursion depth is computed at runtime # [type: int, advanced: FALSE, range: [0,1], default: 0] heuristics/dualval/dynamicdepth = 0 # maximal number of variables that may have maximal rank, quit if there are more, turn off by setting -1 # [type: int, advanced: FALSE, range: [-1,2147483647], default: 50] heuristics/dualval/maxequalranks = 50 # minimal gap for which we still run the heuristic, if gap is less we return without doing anything # [type: real, advanced: FALSE, range: [0,100], default: 5] heuristics/dualval/mingap = 5 # value added to objective of slack variables, must not be zero # [type: real, advanced: FALSE, range: [0.1,1e+20], default: 1] heuristics/dualval/lambdaslack = 1 # scaling factor for the objective function # [type: real, advanced: FALSE, range: [0,1], default: 0] heuristics/dualval/lambdaobj = 0 # priority of heuristic <farkasdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -900000] heuristics/farkasdiving/priority = -900000 # frequency for calling primal heuristic <farkasdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/farkasdiving/freq = 10 # frequency offset for calling primal heuristic <farkasdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/farkasdiving/freqofs = 0 # maximal depth level to call primal heuristic <farkasdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/farkasdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/farkasdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/farkasdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/farkasdiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/farkasdiving/maxlpiterofs = 1000 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/farkasdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/farkasdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/farkasdiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/farkasdiving/maxdiveavgquotnosol = 0 # use one level of backtracking if infeasibility is encountered? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/farkasdiving/backtrack = TRUE # percentage of immediate domain changes during probing to trigger LP resolve # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.15] heuristics/farkasdiving/lpresolvedomchgquot = 0.15 # LP solve frequency for diving heuristics (0: only after enough domain changes have been found) # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] heuristics/farkasdiving/lpsolvefreq = 1 # should only LP branching candidates be considered instead of the slower but more general constraint handler diving variable selection? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/farkasdiving/onlylpbranchcands = FALSE # should diving candidates be checked before running? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/farkasdiving/checkcands = FALSE # should the score be scaled? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/farkasdiving/scalescore = TRUE # should the heuristic only run within the tree if at least one solution was found at the root node? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/farkasdiving/rootsuccess = TRUE # maximal occurance factor of an objective coefficient # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/farkasdiving/maxobjocc = 1 # minimal objective dynamism (log) to run # [type: real, advanced: TRUE, range: [0,1e+20], default: 0.0001] heuristics/farkasdiving/objdynamism = 0.0001 # scale score by [f]ractionality or [i]mpact on farkasproof # [type: char, advanced: TRUE, range: {fi}, default: i] heuristics/farkasdiving/scaletype = i # priority of heuristic <feaspump> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1000000] heuristics/feaspump/priority = -1000000 # frequency for calling primal heuristic <feaspump> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 20] heuristics/feaspump/freq = 20 # frequency offset for calling primal heuristic <feaspump> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/feaspump/freqofs = 0 # maximal depth level to call primal heuristic <feaspump> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/feaspump/maxdepth = -1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.01] heuristics/feaspump/maxlpiterquot = 0.01 # factor by which the regard of the objective is decreased in each round, 1.0 for dynamic # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/feaspump/objfactor = 0.1 # initial weight of the objective function in the convex combination # [type: real, advanced: FALSE, range: [0,1], default: 1] heuristics/feaspump/alpha = 1 # threshold difference for the convex parameter to perform perturbation # [type: real, advanced: FALSE, range: [0,1], default: 1] heuristics/feaspump/alphadiff = 1 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/feaspump/maxlpiterofs = 1000 # total number of feasible solutions found up to which heuristic is called (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10] heuristics/feaspump/maxsols = 10 # maximal number of pumping loops (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10000] heuristics/feaspump/maxloops = 10000 # maximal number of pumping rounds without fractionality improvement (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10] heuristics/feaspump/maxstallloops = 10 # minimum number of random variables to flip, if a 1-cycle is encountered # [type: int, advanced: TRUE, range: [1,2147483647], default: 10] heuristics/feaspump/minflips = 10 # maximum length of cycles to be checked explicitly in each round # [type: int, advanced: TRUE, range: [1,100], default: 3] heuristics/feaspump/cyclelength = 3 # number of iterations until a random perturbation is forced # [type: int, advanced: TRUE, range: [1,2147483647], default: 100] heuristics/feaspump/perturbfreq = 100 # radius (using Manhattan metric) of the neighborhood to be searched in stage 3 # [type: int, advanced: FALSE, range: [1,2147483647], default: 18] heuristics/feaspump/neighborhoodsize = 18 # should the feasibility pump be called at root node before cut separation? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/feaspump/beforecuts = TRUE # should an iterative round-and-propagate scheme be used to find the integral points? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/feaspump/usefp20 = FALSE # should a random perturbation be performed if a feasible solution was found? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/feaspump/pertsolfound = TRUE # should we solve a local branching sub-MIP if no solution could be found? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/feaspump/stage3 = FALSE # should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/feaspump/copycuts = TRUE # priority of heuristic <fixandinfer> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -500000] heuristics/fixandinfer/priority = -500000 # frequency for calling primal heuristic <fixandinfer> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/fixandinfer/freq = -1 # frequency offset for calling primal heuristic <fixandinfer> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/fixandinfer/freqofs = 0 # maximal depth level to call primal heuristic <fixandinfer> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/fixandinfer/maxdepth = -1 # maximal number of propagation rounds in probing subproblems (-1: no limit, 0: auto) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 0] heuristics/fixandinfer/proprounds = 0 # minimal number of fixings to apply before dive may be aborted # [type: int, advanced: TRUE, range: [0,2147483647], default: 100] heuristics/fixandinfer/minfixings = 100 # priority of heuristic <fracdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1003000] heuristics/fracdiving/priority = -1003000 # frequency for calling primal heuristic <fracdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/fracdiving/freq = 10 # frequency offset for calling primal heuristic <fracdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 3] heuristics/fracdiving/freqofs = 3 # maximal depth level to call primal heuristic <fracdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/fracdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/fracdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/fracdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/fracdiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/fracdiving/maxlpiterofs = 1000 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/fracdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/fracdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/fracdiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/fracdiving/maxdiveavgquotnosol = 0 # use one level of backtracking if infeasibility is encountered? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/fracdiving/backtrack = TRUE # percentage of immediate domain changes during probing to trigger LP resolve # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.15] heuristics/fracdiving/lpresolvedomchgquot = 0.15 # LP solve frequency for diving heuristics (0: only after enough domain changes have been found) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] heuristics/fracdiving/lpsolvefreq = 0 # should only LP branching candidates be considered instead of the slower but more general constraint handler diving variable selection? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/fracdiving/onlylpbranchcands = FALSE # priority of heuristic <gins> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1103000] heuristics/gins/priority = -1103000 # frequency for calling primal heuristic <gins> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 20] heuristics/gins/freq = 20 # frequency offset for calling primal heuristic <gins> # [type: int, advanced: FALSE, range: [0,65534], default: 8] heuristics/gins/freqofs = 8 # maximal depth level to call primal heuristic <gins> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gins/maxdepth = -1 # number of nodes added to the contingent of the total nodes # [type: int, advanced: FALSE, range: [0,2147483647], default: 500] heuristics/gins/nodesofs = 500 # maximum number of nodes to regard in the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 5000] heuristics/gins/maxnodes = 5000 # minimum number of nodes required to start the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 50] heuristics/gins/minnodes = 50 # number of nodes without incumbent change that heuristic should wait # [type: int, advanced: TRUE, range: [0,2147483647], default: 100] heuristics/gins/nwaitingnodes = 100 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.15] heuristics/gins/nodesquot = 0.15 # percentage of integer variables that have to be fixed # [type: real, advanced: FALSE, range: [1e-06,0.999999], default: 0.66] heuristics/gins/minfixingrate = 0.66 # factor by which gins should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/gins/minimprove = 0.01 # should subproblem be created out of the rows in the LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gins/uselprows = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/gins/copycuts = TRUE # should continuous variables outside the neighborhoods be fixed? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gins/fixcontvars = FALSE # limit on number of improving incumbent solutions in sub-CIP # [type: int, advanced: FALSE, range: [-1,2147483647], default: 3] heuristics/gins/bestsollimit = 3 # maximum distance to selected variable to enter the subproblem, or -1 to select the distance that best approximates the minimum fixing rate from below # [type: int, advanced: FALSE, range: [-1,2147483647], default: 3] heuristics/gins/maxdistance = 3 # the reference point to compute the neighborhood potential: (r)oot, (l)ocal lp, or (p)seudo solution # [type: char, advanced: TRUE, range: {lpr}, default: r] heuristics/gins/potential = r # should the heuristic solve a sequence of sub-MIP's around the first selected variable # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/gins/userollinghorizon = TRUE # should dense constraints (at least as dense as 1 - minfixingrate) be ignored by connectivity graph? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gins/relaxdenseconss = FALSE # limiting percentage for variables already used in sub-SCIPs to terminate rolling horizon approach # [type: real, advanced: TRUE, range: [0,1], default: 0.4] heuristics/gins/rollhorizonlimfac = 0.4 # overlap of blocks between runs - 0.0: no overlap, 1.0: shift by only 1 block # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/gins/overlap = 0 # should user decompositions be considered, if available? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/gins/usedecomp = TRUE # should user decompositions be considered for initial selection in rolling horizon, if available? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gins/usedecomprollhorizon = FALSE # should random initial variable selection be used if decomposition was not successful? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/gins/useselfallback = TRUE # should blocks be treated consecutively (sorted by ascending label?) # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/gins/consecutiveblocks = TRUE # priority of heuristic <guideddiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1007000] heuristics/guideddiving/priority = -1007000 # frequency for calling primal heuristic <guideddiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/guideddiving/freq = 10 # frequency offset for calling primal heuristic <guideddiving> # [type: int, advanced: FALSE, range: [0,65534], default: 7] heuristics/guideddiving/freqofs = 7 # maximal depth level to call primal heuristic <guideddiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/guideddiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/guideddiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/guideddiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/guideddiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/guideddiving/maxlpiterofs = 1000 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/guideddiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/guideddiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/guideddiving/maxdiveubquotnosol = 1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 1] heuristics/guideddiving/maxdiveavgquotnosol = 1 # use one level of backtracking if infeasibility is encountered? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/guideddiving/backtrack = TRUE # percentage of immediate domain changes during probing to trigger LP resolve # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.15] heuristics/guideddiving/lpresolvedomchgquot = 0.15 # LP solve frequency for diving heuristics (0: only after enough domain changes have been found) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] heuristics/guideddiving/lpsolvefreq = 0 # should only LP branching candidates be considered instead of the slower but more general constraint handler diving variable selection? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/guideddiving/onlylpbranchcands = FALSE # priority of heuristic <indicator> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -20200] heuristics/indicator/priority = -20200 # frequency for calling primal heuristic <indicator> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] heuristics/indicator/freq = 1 # frequency offset for calling primal heuristic <indicator> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/indicator/freqofs = 0 # maximal depth level to call primal heuristic <indicator> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/indicator/maxdepth = -1 # whether the one-opt heuristic should be started # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/indicator/oneopt = FALSE # Try to improve other solutions by one-opt? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/indicator/improvesols = FALSE # priority of heuristic <intdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1003500] heuristics/intdiving/priority = -1003500 # frequency for calling primal heuristic <intdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/intdiving/freq = -1 # frequency offset for calling primal heuristic <intdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 9] heuristics/intdiving/freqofs = 9 # maximal depth level to call primal heuristic <intdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/intdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/intdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/intdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/intdiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/intdiving/maxlpiterofs = 1000 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/intdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/intdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/intdiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/intdiving/maxdiveavgquotnosol = 0 # use one level of backtracking if infeasibility is encountered? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/intdiving/backtrack = TRUE # priority of heuristic <intshifting> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -10000] heuristics/intshifting/priority = -10000 # frequency for calling primal heuristic <intshifting> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/intshifting/freq = 10 # frequency offset for calling primal heuristic <intshifting> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/intshifting/freqofs = 0 # maximal depth level to call primal heuristic <intshifting> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/intshifting/maxdepth = -1 # priority of heuristic <linesearchdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1006000] heuristics/linesearchdiving/priority = -1006000 # frequency for calling primal heuristic <linesearchdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/linesearchdiving/freq = 10 # frequency offset for calling primal heuristic <linesearchdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 6] heuristics/linesearchdiving/freqofs = 6 # maximal depth level to call primal heuristic <linesearchdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/linesearchdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/linesearchdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/linesearchdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/linesearchdiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/linesearchdiving/maxlpiterofs = 1000 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/linesearchdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/linesearchdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/linesearchdiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/linesearchdiving/maxdiveavgquotnosol = 0 # use one level of backtracking if infeasibility is encountered? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/linesearchdiving/backtrack = TRUE # percentage of immediate domain changes during probing to trigger LP resolve # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.15] heuristics/linesearchdiving/lpresolvedomchgquot = 0.15 # LP solve frequency for diving heuristics (0: only after enough domain changes have been found) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] heuristics/linesearchdiving/lpsolvefreq = 0 # should only LP branching candidates be considered instead of the slower but more general constraint handler diving variable selection? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/linesearchdiving/onlylpbranchcands = FALSE # priority of heuristic <localbranching> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1102000] heuristics/localbranching/priority = -1102000 # frequency for calling primal heuristic <localbranching> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/localbranching/freq = -1 # frequency offset for calling primal heuristic <localbranching> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/localbranching/freqofs = 0 # maximal depth level to call primal heuristic <localbranching> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/localbranching/maxdepth = -1 # number of nodes added to the contingent of the total nodes # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/localbranching/nodesofs = 1000 # radius (using Manhattan metric) of the incumbent's neighborhood to be searched # [type: int, advanced: FALSE, range: [1,2147483647], default: 18] heuristics/localbranching/neighborhoodsize = 18 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.05] heuristics/localbranching/nodesquot = 0.05 # factor by which the limit on the number of LP depends on the node limit # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 1.5] heuristics/localbranching/lplimfac = 1.5 # minimum number of nodes required to start the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 1000] heuristics/localbranching/minnodes = 1000 # maximum number of nodes to regard in the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 10000] heuristics/localbranching/maxnodes = 10000 # number of nodes without incumbent change that heuristic should wait # [type: int, advanced: TRUE, range: [0,2147483647], default: 200] heuristics/localbranching/nwaitingnodes = 200 # factor by which localbranching should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/localbranching/minimprove = 0.01 # should subproblem be created out of the rows in the LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/localbranching/uselprows = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/localbranching/copycuts = TRUE # limit on number of improving incumbent solutions in sub-CIP # [type: int, advanced: FALSE, range: [-1,2147483647], default: 3] heuristics/localbranching/bestsollimit = 3 # priority of heuristic <locks> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 3000] heuristics/locks/priority = 3000 # frequency for calling primal heuristic <locks> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/locks/freq = 0 # frequency offset for calling primal heuristic <locks> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/locks/freqofs = 0 # maximal depth level to call primal heuristic <locks> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/locks/maxdepth = -1 # maximum number of propagation rounds to be performed in each propagation call (-1: no limit, -2: parameter settings) # [type: int, advanced: TRUE, range: [-2,2147483647], default: 2] heuristics/locks/maxproprounds = 2 # minimum percentage of integer variables that have to be fixable # [type: real, advanced: FALSE, range: [0,1], default: 0.65] heuristics/locks/minfixingrate = 0.65 # probability for rounding a variable up in case of ties # [type: real, advanced: FALSE, range: [0,1], default: 0.67] heuristics/locks/roundupprobability = 0.67 # should a final sub-MIP be solved to costruct a feasible solution if the LP was not roundable? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/locks/usefinalsubmip = TRUE # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/locks/maxnodes = 5000 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 500] heuristics/locks/nodesofs = 500 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 500] heuristics/locks/minnodes = 500 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/locks/nodesquot = 0.1 # factor by which locks heuristic should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/locks/minimprove = 0.01 # should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/locks/copycuts = TRUE # should the locks be updated based on LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/locks/updatelocks = TRUE # minimum fixing rate over all variables (including continuous) to solve LP # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/locks/minfixingratelp = 0 # priority of heuristic <lpface> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1104000] heuristics/lpface/priority = -1104000 # frequency for calling primal heuristic <lpface> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 15] heuristics/lpface/freq = 15 # frequency offset for calling primal heuristic <lpface> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/lpface/freqofs = 0 # maximal depth level to call primal heuristic <lpface> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/lpface/maxdepth = -1 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 200] heuristics/lpface/nodesofs = 200 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/lpface/maxnodes = 5000 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 50] heuristics/lpface/minnodes = 50 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/lpface/nodesquot = 0.1 # required percentage of fixed integer variables in sub-MIP to run # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/lpface/minfixingrate = 0.1 # factor by which the limit on the number of LP depends on the node limit # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 2] heuristics/lpface/lplimfac = 2 # should subproblem be created out of the rows in the LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/lpface/uselprows = TRUE # should dually nonbasic rows be turned into equations? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/lpface/dualbasisequations = FALSE # should the heuristic continue solving the same sub-SCIP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/lpface/keepsubscip = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/lpface/copycuts = TRUE # objective function in the sub-SCIP: (z)ero, (r)oot-LP-difference, (i)nference, LP (f)ractionality, (o)riginal # [type: char, advanced: TRUE, range: {forzi}, default: z] heuristics/lpface/subscipobjective = z # the minimum active search tree path length along which lower bound hasn't changed before heuristic becomes active # [type: int, advanced: TRUE, range: [0,65531], default: 5] heuristics/lpface/minpathlen = 5 # priority of heuristic <alns> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1100500] heuristics/alns/priority = -1100500 # frequency for calling primal heuristic <alns> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 20] heuristics/alns/freq = 20 # frequency offset for calling primal heuristic <alns> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/alns/freqofs = 0 # maximal depth level to call primal heuristic <alns> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/alns/maxdepth = -1 # minimum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.3] heuristics/alns/rens/minfixingrate = 0.3 # maximum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.9] heuristics/alns/rens/maxfixingrate = 0.9 # is this neighborhood active? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/rens/active = TRUE # positive call priority to initialize bandit algorithms # [type: real, advanced: TRUE, range: [0.01,1], default: 1] heuristics/alns/rens/priority = 1 # minimum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.3] heuristics/alns/rins/minfixingrate = 0.3 # maximum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.9] heuristics/alns/rins/maxfixingrate = 0.9 # is this neighborhood active? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/rins/active = TRUE # positive call priority to initialize bandit algorithms # [type: real, advanced: TRUE, range: [0.01,1], default: 1] heuristics/alns/rins/priority = 1 # minimum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.3] heuristics/alns/mutation/minfixingrate = 0.3 # maximum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.9] heuristics/alns/mutation/maxfixingrate = 0.9 # is this neighborhood active? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/mutation/active = TRUE # positive call priority to initialize bandit algorithms # [type: real, advanced: TRUE, range: [0.01,1], default: 1] heuristics/alns/mutation/priority = 1 # minimum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.3] heuristics/alns/localbranching/minfixingrate = 0.3 # maximum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.9] heuristics/alns/localbranching/maxfixingrate = 0.9 # is this neighborhood active? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/localbranching/active = TRUE # positive call priority to initialize bandit algorithms # [type: real, advanced: TRUE, range: [0.01,1], default: 1] heuristics/alns/localbranching/priority = 1 # minimum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.3] heuristics/alns/crossover/minfixingrate = 0.3 # maximum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.9] heuristics/alns/crossover/maxfixingrate = 0.9 # is this neighborhood active? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/crossover/active = TRUE # positive call priority to initialize bandit algorithms # [type: real, advanced: TRUE, range: [0.01,1], default: 1] heuristics/alns/crossover/priority = 1 # the number of solutions that crossover should combine # [type: int, advanced: TRUE, range: [2,10], default: 2] heuristics/alns/crossover/nsols = 2 # minimum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.3] heuristics/alns/proximity/minfixingrate = 0.3 # maximum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.9] heuristics/alns/proximity/maxfixingrate = 0.9 # is this neighborhood active? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/proximity/active = TRUE # positive call priority to initialize bandit algorithms # [type: real, advanced: TRUE, range: [0.01,1], default: 1] heuristics/alns/proximity/priority = 1 # minimum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.3] heuristics/alns/zeroobjective/minfixingrate = 0.3 # maximum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.9] heuristics/alns/zeroobjective/maxfixingrate = 0.9 # is this neighborhood active? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/zeroobjective/active = TRUE # positive call priority to initialize bandit algorithms # [type: real, advanced: TRUE, range: [0.01,1], default: 1] heuristics/alns/zeroobjective/priority = 1 # minimum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.3] heuristics/alns/dins/minfixingrate = 0.3 # maximum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.9] heuristics/alns/dins/maxfixingrate = 0.9 # is this neighborhood active? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/dins/active = TRUE # positive call priority to initialize bandit algorithms # [type: real, advanced: TRUE, range: [0.01,1], default: 1] heuristics/alns/dins/priority = 1 # number of pool solutions where binary solution values must agree # [type: int, advanced: TRUE, range: [1,100], default: 5] heuristics/alns/dins/npoolsols = 5 # minimum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.3] heuristics/alns/trustregion/minfixingrate = 0.3 # maximum fixing rate for this neighborhood # [type: real, advanced: TRUE, range: [0,1], default: 0.9] heuristics/alns/trustregion/maxfixingrate = 0.9 # is this neighborhood active? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/alns/trustregion/active = FALSE # positive call priority to initialize bandit algorithms # [type: real, advanced: TRUE, range: [0.01,1], default: 1] heuristics/alns/trustregion/priority = 1 # the penalty for each change in the binary variables from the candidate solution # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 100] heuristics/alns/trustregion/violpenalty = 100 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/alns/maxnodes = 5000 # offset added to the nodes budget # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 500] heuristics/alns/nodesofs = 500 # minimum number of nodes required to start a sub-SCIP # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 50] heuristics/alns/minnodes = 50 # number of nodes since last incumbent solution that the heuristic should wait # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 25] heuristics/alns/waitingnodes = 25 # fraction of nodes compared to the main SCIP for budget computation # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/alns/nodesquot = 0.1 # lower bound fraction of nodes compared to the main SCIP for budget computation # [type: real, advanced: FALSE, range: [0,1], default: 0] heuristics/alns/nodesquotmin = 0 # initial factor by which ALNS should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/alns/startminimprove = 0.01 # lower threshold for the minimal improvement over the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/alns/minimprovelow = 0.01 # upper bound for the minimal improvement over the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/alns/minimprovehigh = 0.01 # limit on the number of improving solutions in a sub-SCIP call # [type: int, advanced: FALSE, range: [-1,2147483647], default: 3] heuristics/alns/nsolslim = 3 # the bandit algorithm: (u)pper confidence bounds, (e)xp.3, epsilon (g)reedy # [type: char, advanced: TRUE, range: {ueg}, default: u] heuristics/alns/banditalgo = u # weight between uniform (gamma ~ 1) and weight driven (gamma ~ 0) probability distribution for exp3 # [type: real, advanced: TRUE, range: [0,1], default: 0.07041455] heuristics/alns/gamma = 0.07041455 # reward offset between 0 and 1 at every observation for Exp.3 # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/alns/beta = 0 # parameter to increase the confidence width in UCB # [type: real, advanced: TRUE, range: [0,100], default: 0.0016] heuristics/alns/alpha = 0.0016 # distances from fixed variables be used for variable prioritization # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/usedistances = TRUE # should reduced cost scores be used for variable prioritization? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/useredcost = TRUE # should the ALNS heuristic do more fixings by itself based on variable prioritization until the target fixing rate is reached? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/domorefixings = TRUE # should the heuristic adjust the target fixing rate based on the success? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/adjustfixingrate = TRUE # should the heuristic activate other sub-SCIP heuristics during its search? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/alns/usesubscipheurs = FALSE # reward control to increase the weight of the simple solution indicator and decrease the weight of the closed gap reward # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/alns/rewardcontrol = 0.8 # factor by which target node number is eventually increased # [type: real, advanced: TRUE, range: [1,100000], default: 1.05] heuristics/alns/targetnodefactor = 1.05 # initial random seed for bandit algorithms and random decisions by neighborhoods # [type: int, advanced: FALSE, range: [0,2147483647], default: 113] heuristics/alns/seed = 113 # number of allowed executions of the heuristic on the same incumbent solution (-1: no limit, 0: number of active neighborhoods) # [type: int, advanced: TRUE, range: [-1,100], default: -1] heuristics/alns/maxcallssamesol = -1 # should the factor by which the minimum improvement is bound be dynamically updated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/alns/adjustminimprove = FALSE # should the target nodes be dynamically adjusted? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/adjusttargetnodes = TRUE # increase exploration in epsilon-greedy bandit algorithm # [type: real, advanced: TRUE, range: [0,1], default: 0.4685844] heuristics/alns/eps = 0.4685844 # the reward baseline to separate successful and failed calls # [type: real, advanced: TRUE, range: [0,0.99], default: 0.5] heuristics/alns/rewardbaseline = 0.5 # should the bandit algorithms be reset when a new problem is read? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/resetweights = TRUE # file name to store all rewards and the selection of the bandit # [type: string, advanced: TRUE, default: "-"] heuristics/alns/rewardfilename = "-" # should random seeds of sub-SCIPs be altered to increase diversification? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/alns/subsciprandseeds = FALSE # should the reward be scaled by the effort? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/scalebyeffort = TRUE # should cutting planes be copied to the sub-SCIP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/alns/copycuts = FALSE # tolerance by which the fixing rate may be missed without generic fixing # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/alns/fixtol = 0.1 # tolerance by which the fixing rate may be exceeded without generic unfixing # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/alns/unfixtol = 0.1 # should local reduced costs be used for generic (un)fixing? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/alns/uselocalredcost = FALSE # should pseudo cost scores be used for variable priorization? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/alns/usepscost = TRUE # should the heuristic be executed multiple times during the root node? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/alns/initduringroot = FALSE # is statistics table <neighborhood> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/neighborhood/active = TRUE # priority of heuristic <multistart> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -2100000] heuristics/multistart/priority = -2100000 # frequency for calling primal heuristic <multistart> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/multistart/freq = 0 # frequency offset for calling primal heuristic <multistart> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/multistart/freqofs = 0 # maximal depth level to call primal heuristic <multistart> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/multistart/maxdepth = -1 # number of random points generated per execution call # [type: int, advanced: FALSE, range: [0,2147483647], default: 100] heuristics/multistart/nrndpoints = 100 # maximum variable domain size for unbounded variables # [type: real, advanced: FALSE, range: [0,1e+20], default: 20000] heuristics/multistart/maxboundsize = 20000 # number of iterations to reduce the maximum violation of a point # [type: int, advanced: FALSE, range: [0,2147483647], default: 300] heuristics/multistart/maxiter = 300 # minimum required improving factor to proceed in improvement of a single point # [type: real, advanced: FALSE, range: [-1e+20,1e+20], default: 0.05] heuristics/multistart/minimprfac = 0.05 # number of iteration when checking the minimum improvement # [type: int, advanced: FALSE, range: [1,2147483647], default: 10] heuristics/multistart/minimpriter = 10 # maximum distance between two points in the same cluster # [type: real, advanced: FALSE, range: [0,1e+20], default: 0.15] heuristics/multistart/maxreldist = 0.15 # limit for gradient computations for all improvePoint() calls (0 for no limit) # [type: real, advanced: FALSE, range: [0,1e+20], default: 5000000] heuristics/multistart/gradlimit = 5000000 # maximum number of considered clusters per heuristic call # [type: int, advanced: FALSE, range: [0,2147483647], default: 3] heuristics/multistart/maxncluster = 3 # should the heuristic run only on continuous problems? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/multistart/onlynlps = TRUE # priority of heuristic <mpec> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -2050000] heuristics/mpec/priority = -2050000 # frequency for calling primal heuristic <mpec> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 50] heuristics/mpec/freq = 50 # frequency offset for calling primal heuristic <mpec> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/mpec/freqofs = 0 # maximal depth level to call primal heuristic <mpec> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/mpec/maxdepth = -1 # initial regularization right-hand side value # [type: real, advanced: FALSE, range: [0,0.25], default: 0.125] heuristics/mpec/inittheta = 0.125 # regularization update factor # [type: real, advanced: FALSE, range: [0,1], default: 0.5] heuristics/mpec/sigma = 0.5 # maximum number of NLP iterations per solve # [type: real, advanced: FALSE, range: [0,1], default: 0.001] heuristics/mpec/subnlptrigger = 0.001 # maximum cost available for solving NLPs per call of the heuristic # [type: real, advanced: FALSE, range: [0,1e+20], default: 100000000] heuristics/mpec/maxnlpcost = 100000000 # factor by which heuristic should at least improve the incumbent # [type: real, advanced: FALSE, range: [0,1], default: 0.01] heuristics/mpec/minimprove = 0.01 # minimum amount of gap left in order to call the heuristic # [type: real, advanced: FALSE, range: [0,1e+20], default: 0.05] heuristics/mpec/mingapleft = 0.05 # maximum number of iterations of the MPEC loop # [type: int, advanced: FALSE, range: [0,2147483647], default: 100] heuristics/mpec/maxiter = 100 # maximum number of NLP iterations per solve # [type: int, advanced: FALSE, range: [0,2147483647], default: 500] heuristics/mpec/maxnlpiter = 500 # maximum number of consecutive calls for which the heuristic did not find an improving solution # [type: int, advanced: FALSE, range: [0,2147483647], default: 10] heuristics/mpec/maxnunsucc = 10 # priority of heuristic <mutation> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1103000] heuristics/mutation/priority = -1103000 # frequency for calling primal heuristic <mutation> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/mutation/freq = -1 # frequency offset for calling primal heuristic <mutation> # [type: int, advanced: FALSE, range: [0,65534], default: 8] heuristics/mutation/freqofs = 8 # maximal depth level to call primal heuristic <mutation> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/mutation/maxdepth = -1 # number of nodes added to the contingent of the total nodes # [type: int, advanced: FALSE, range: [0,2147483647], default: 500] heuristics/mutation/nodesofs = 500 # maximum number of nodes to regard in the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 5000] heuristics/mutation/maxnodes = 5000 # minimum number of nodes required to start the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 500] heuristics/mutation/minnodes = 500 # number of nodes without incumbent change that heuristic should wait # [type: int, advanced: TRUE, range: [0,2147483647], default: 200] heuristics/mutation/nwaitingnodes = 200 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/mutation/nodesquot = 0.1 # percentage of integer variables that have to be fixed # [type: real, advanced: FALSE, range: [1e-06,0.999999], default: 0.8] heuristics/mutation/minfixingrate = 0.8 # factor by which mutation should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/mutation/minimprove = 0.01 # should subproblem be created out of the rows in the LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/mutation/uselprows = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/mutation/copycuts = TRUE # limit on number of improving incumbent solutions in sub-CIP # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] heuristics/mutation/bestsollimit = -1 # should uct node selection be used at the beginning of the search? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/mutation/useuct = FALSE # priority of heuristic <nlpdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1003000] heuristics/nlpdiving/priority = -1003000 # frequency for calling primal heuristic <nlpdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/nlpdiving/freq = 10 # frequency offset for calling primal heuristic <nlpdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 3] heuristics/nlpdiving/freqofs = 3 # maximal depth level to call primal heuristic <nlpdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/nlpdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/nlpdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/nlpdiving/maxreldepth = 1 # minimial absolute number of allowed NLP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 200] heuristics/nlpdiving/maxnlpiterabs = 200 # additional allowed number of NLP iterations relative to successfully found solutions # [type: int, advanced: FALSE, range: [0,2147483647], default: 10] heuristics/nlpdiving/maxnlpiterrel = 10 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/nlpdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/nlpdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/nlpdiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/nlpdiving/maxdiveavgquotnosol = 0 # maximal number of NLPs with feasible solution to solve during one dive # [type: int, advanced: FALSE, range: [1,2147483647], default: 10] heuristics/nlpdiving/maxfeasnlps = 10 # use one level of backtracking if infeasibility is encountered? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/nlpdiving/backtrack = TRUE # should the LP relaxation be solved before the NLP relaxation? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/nlpdiving/lp = FALSE # prefer variables that are also fractional in LP solution? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/nlpdiving/preferlpfracs = FALSE # heuristic will not run if less then this percentage of calls succeeded (0.0: no limit) # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/nlpdiving/minsuccquot = 0.1 # percentage of fractional variables that should be fixed before the next NLP solve # [type: real, advanced: FALSE, range: [0,1], default: 0.2] heuristics/nlpdiving/fixquot = 0.2 # should variables in a minimal cover be preferred? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/nlpdiving/prefercover = TRUE # should a sub-MIP be solved if all cover variables are fixed? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/nlpdiving/solvesubmip = FALSE # should the NLP solver stop early if it converges slow? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/nlpdiving/nlpfastfail = TRUE # which point should be used as starting point for the NLP solver? ('n'one, last 'f'easible, from dive's'tart) # [type: char, advanced: TRUE, range: {fns}, default: s] heuristics/nlpdiving/nlpstart = s # which variable selection should be used? ('f'ractionality, 'c'oefficient, 'p'seudocost, 'g'uided, 'd'ouble, 'v'eclen) # [type: char, advanced: FALSE, range: {fcpgdv}, default: d] heuristics/nlpdiving/varselrule = d # priority of heuristic <objpscostdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1004000] heuristics/objpscostdiving/priority = -1004000 # frequency for calling primal heuristic <objpscostdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 20] heuristics/objpscostdiving/freq = 20 # frequency offset for calling primal heuristic <objpscostdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 4] heuristics/objpscostdiving/freqofs = 4 # maximal depth level to call primal heuristic <objpscostdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/objpscostdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/objpscostdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/objpscostdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to total iteration number # [type: real, advanced: FALSE, range: [0,1], default: 0.01] heuristics/objpscostdiving/maxlpiterquot = 0.01 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/objpscostdiving/maxlpiterofs = 1000 # total number of feasible solutions found up to which heuristic is called (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] heuristics/objpscostdiving/maxsols = -1 # maximal diving depth: number of binary/integer variables times depthfac # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0.5] heuristics/objpscostdiving/depthfac = 0.5 # maximal diving depth factor if no feasible solution was found yet # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 2] heuristics/objpscostdiving/depthfacnosol = 2 # priority of heuristic <octane> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1008000] heuristics/octane/priority = -1008000 # frequency for calling primal heuristic <octane> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/octane/freq = -1 # frequency offset for calling primal heuristic <octane> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/octane/freqofs = 0 # maximal depth level to call primal heuristic <octane> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/octane/maxdepth = -1 # number of 0-1-points to be tested as possible solutions by OCTANE # [type: int, advanced: TRUE, range: [1,2147483647], default: 100] heuristics/octane/fmax = 100 # number of 0-1-points to be tested at first whether they violate a common row # [type: int, advanced: TRUE, range: [1,2147483647], default: 10] heuristics/octane/ffirst = 10 # execute OCTANE only in the space of fractional variables (TRUE) or in the full space? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/octane/usefracspace = TRUE # should the inner normal of the objective be used as one ray direction? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/octane/useobjray = TRUE # should the average of the basic cone be used as one ray direction? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/octane/useavgray = TRUE # should the difference between the root solution and the current LP solution be used as one ray direction? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/octane/usediffray = FALSE # should the weighted average of the basic cone be used as one ray direction? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/octane/useavgwgtray = TRUE # should the weighted average of the nonbasic cone be used as one ray direction? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/octane/useavgnbray = TRUE # priority of heuristic <ofins> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 60000] heuristics/ofins/priority = 60000 # frequency for calling primal heuristic <ofins> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/ofins/freq = 0 # frequency offset for calling primal heuristic <ofins> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/ofins/freqofs = 0 # maximal depth level to call primal heuristic <ofins> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: 0] heuristics/ofins/maxdepth = 0 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/ofins/maxnodes = 5000 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 50] heuristics/ofins/minnodes = 50 # maximal rate of changed coefficients # [type: real, advanced: FALSE, range: [0,1], default: 0.5] heuristics/ofins/maxchangerate = 0.5 # maximal rate of change per coefficient to get fixed # [type: real, advanced: FALSE, range: [0,1], default: 0.04] heuristics/ofins/maxchange = 0.04 # should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/ofins/copycuts = TRUE # should all subproblem solutions be added to the original SCIP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/ofins/addallsols = FALSE # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 500] heuristics/ofins/nodesofs = 500 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/ofins/nodesquot = 0.1 # factor by which RENS should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/ofins/minimprove = 0.01 # factor by which the limit on the number of LP depends on the node limit # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 2] heuristics/ofins/lplimfac = 2 # priority of heuristic <oneopt> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -20000] heuristics/oneopt/priority = -20000 # frequency for calling primal heuristic <oneopt> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] heuristics/oneopt/freq = 1 # frequency offset for calling primal heuristic <oneopt> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/oneopt/freqofs = 0 # maximal depth level to call primal heuristic <oneopt> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/oneopt/maxdepth = -1 # should the objective be weighted with the potential shifting value when sorting the shifting candidates? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/oneopt/weightedobj = TRUE # should the heuristic be called before and during the root node? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/oneopt/duringroot = TRUE # should the construction of the LP be forced even if LP solving is deactivated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/oneopt/forcelpconstruction = FALSE # should the heuristic be called before presolving? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/oneopt/beforepresol = FALSE # should the heuristic continue to run as long as improvements are found? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/oneopt/useloop = TRUE # priority of heuristic <padm> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 70000] heuristics/padm/priority = 70000 # frequency for calling primal heuristic <padm> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/padm/freq = 0 # frequency offset for calling primal heuristic <padm> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/padm/freqofs = 0 # maximal depth level to call primal heuristic <padm> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/padm/maxdepth = -1 # maximum number of nodes to regard in all subproblems # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/padm/maxnodes = 5000 # minimum number of nodes to regard in one subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 50] heuristics/padm/minnodes = 50 # factor to control nodelimits of subproblems # [type: real, advanced: TRUE, range: [0,0.99], default: 0.8] heuristics/padm/nodefac = 0.8 # maximal number of ADM iterations in each penalty loop # [type: int, advanced: TRUE, range: [1,100], default: 4] heuristics/padm/admiterations = 4 # maximal number of penalty iterations # [type: int, advanced: TRUE, range: [1,100000], default: 100] heuristics/padm/penaltyiterations = 100 # mipgap at start # [type: real, advanced: TRUE, range: [0,16], default: 2] heuristics/padm/gap = 2 # should the problem get reoptimized with the original objective function? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/padm/reoptimize = TRUE # enable sigmoid rescaling of penalty parameters # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/padm/scaling = TRUE # should linking constraints be assigned? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/padm/assignlinking = TRUE # should the original problem be used? This is only for testing and not recommended! # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/padm/original = FALSE # should the heuristic run before or after the processing of the node? (0: before, 1: after, 2: both) # [type: int, advanced: FALSE, range: [0,2], default: 0] heuristics/padm/timing = 0 # priority of heuristic <proximity> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -2000000] heuristics/proximity/priority = -2000000 # frequency for calling primal heuristic <proximity> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/proximity/freq = -1 # frequency offset for calling primal heuristic <proximity> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/proximity/freqofs = 0 # maximal depth level to call primal heuristic <proximity> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/proximity/maxdepth = -1 # should subproblem be constructed based on LP row information? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/proximity/uselprows = FALSE # should the heuristic immediately run again on its newly found solution? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/proximity/restart = TRUE # should the heuristic solve a final LP in case of continuous objective variables? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/proximity/usefinallp = FALSE # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 10000] heuristics/proximity/maxnodes = 10000 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 50] heuristics/proximity/nodesofs = 50 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 1] heuristics/proximity/minnodes = 1 # maximum number of LP iterations to be performed in the subproblem # [type: longint, advanced: TRUE, range: [-1,9223372036854775807], default: 100000] heuristics/proximity/maxlpiters = 100000 # minimum number of LP iterations performed in subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 200] heuristics/proximity/minlpiters = 200 # waiting nodes since last incumbent before heuristic is executed # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 100] heuristics/proximity/waitingnodes = 100 # factor by which proximity should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.02] heuristics/proximity/minimprove = 0.02 # sub-MIP node limit w.r.t number of original nodes # [type: real, advanced: TRUE, range: [0,1e+20], default: 0.1] heuristics/proximity/nodesquot = 0.1 # threshold for percentage of binary variables required to start # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/proximity/binvarquot = 0.1 # quotient of sub-MIP LP iterations with respect to LP iterations so far # [type: real, advanced: TRUE, range: [0,1], default: 0.2] heuristics/proximity/lpitersquot = 0.2 # minimum primal-dual gap for which the heuristic is executed # [type: real, advanced: TRUE, range: [0,1e+20], default: 0.01] heuristics/proximity/mingap = 0.01 # should uct node selection be used at the beginning of the search? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/proximity/useuct = FALSE # priority of heuristic <pscostdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1002000] heuristics/pscostdiving/priority = -1002000 # frequency for calling primal heuristic <pscostdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/pscostdiving/freq = 10 # frequency offset for calling primal heuristic <pscostdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 2] heuristics/pscostdiving/freqofs = 2 # maximal depth level to call primal heuristic <pscostdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/pscostdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/pscostdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/pscostdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/pscostdiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/pscostdiving/maxlpiterofs = 1000 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/pscostdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/pscostdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/pscostdiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/pscostdiving/maxdiveavgquotnosol = 0 # use one level of backtracking if infeasibility is encountered? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/pscostdiving/backtrack = TRUE # percentage of immediate domain changes during probing to trigger LP resolve # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.15] heuristics/pscostdiving/lpresolvedomchgquot = 0.15 # LP solve frequency for diving heuristics (0: only after enough domain changes have been found) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] heuristics/pscostdiving/lpsolvefreq = 0 # should only LP branching candidates be considered instead of the slower but more general constraint handler diving variable selection? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/pscostdiving/onlylpbranchcands = TRUE # priority of heuristic <randrounding> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -200] heuristics/randrounding/priority = -200 # frequency for calling primal heuristic <randrounding> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 20] heuristics/randrounding/freq = 20 # frequency offset for calling primal heuristic <randrounding> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/randrounding/freqofs = 0 # maximal depth level to call primal heuristic <randrounding> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/randrounding/maxdepth = -1 # should the heuristic only be called once per node? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/randrounding/oncepernode = FALSE # should the heuristic apply the variable lock strategy of simple rounding, if possible? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/randrounding/usesimplerounding = FALSE # should the probing part of the heuristic be applied exclusively at the root node? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/randrounding/propagateonlyroot = TRUE # limit of rounds for each propagation call # [type: int, advanced: TRUE, range: [-1,2147483647], default: 1] heuristics/randrounding/maxproprounds = 1 # priority of heuristic <rens> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1100000] heuristics/rens/priority = -1100000 # frequency for calling primal heuristic <rens> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/rens/freq = 0 # frequency offset for calling primal heuristic <rens> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/rens/freqofs = 0 # maximal depth level to call primal heuristic <rens> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/rens/maxdepth = -1 # minimum percentage of integer variables that have to be fixable # [type: real, advanced: FALSE, range: [0,1], default: 0.5] heuristics/rens/minfixingrate = 0.5 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/rens/maxnodes = 5000 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 500] heuristics/rens/nodesofs = 500 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 50] heuristics/rens/minnodes = 50 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/rens/nodesquot = 0.1 # factor by which RENS should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/rens/minimprove = 0.01 # factor by which the limit on the number of LP depends on the node limit # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 2] heuristics/rens/lplimfac = 2 # solution that is used for fixing values ('l'p relaxation, 'n'lp relaxation) # [type: char, advanced: FALSE, range: {nl}, default: l] heuristics/rens/startsol = l # should general integers get binary bounds [floor(.),ceil(.)] ? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/rens/binarybounds = TRUE # should subproblem be created out of the rows in the LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/rens/uselprows = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/rens/copycuts = TRUE # should the RENS sub-CIP get its own full time limit? This is only for testing and not recommended! # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/rens/extratime = FALSE # should all subproblem solutions be added to the original SCIP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/rens/addallsols = FALSE # should the RENS sub-CIP be solved with cuts, conflicts, strong branching,... This is only for testing and not recommended! # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/rens/fullscale = FALSE # limit on number of improving incumbent solutions in sub-CIP # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] heuristics/rens/bestsollimit = -1 # should uct node selection be used at the beginning of the search? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/rens/useuct = FALSE # priority of heuristic <reoptsols> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 40000] heuristics/reoptsols/priority = 40000 # frequency for calling primal heuristic <reoptsols> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/reoptsols/freq = 0 # frequency offset for calling primal heuristic <reoptsols> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/reoptsols/freqofs = 0 # maximal depth level to call primal heuristic <reoptsols> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: 0] heuristics/reoptsols/maxdepth = 0 # maximal number solutions which should be checked. (-1: all) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 1000] heuristics/reoptsols/maxsols = 1000 # check solutions of the last k runs. (-1: all) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] heuristics/reoptsols/maxruns = -1 # priority of heuristic <repair> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] heuristics/repair/priority = 0 # frequency for calling primal heuristic <repair> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/repair/freq = -1 # frequency offset for calling primal heuristic <repair> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/repair/freqofs = 0 # maximal depth level to call primal heuristic <repair> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/repair/maxdepth = -1 # file name of a solution to be used as infeasible starting point, [-] if not available # [type: string, advanced: FALSE, default: "-"] heuristics/repair/filename = "-" # True : fractional variables which are not fractional in the given solution are rounded, FALSE : solving process of this heuristic is stopped. # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/repair/roundit = TRUE # should a scaled objective function for original variables be used in repair subproblem? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/repair/useobjfactor = FALSE # should variable fixings be used in repair subproblem? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/repair/usevarfix = TRUE # should slack variables be used in repair subproblem? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/repair/useslackvars = FALSE # factor for the potential of var fixings # [type: real, advanced: TRUE, range: [0,100], default: 2] heuristics/repair/alpha = 2 # number of nodes added to the contingent of the total nodes # [type: int, advanced: FALSE, range: [0,2147483647], default: 500] heuristics/repair/nodesofs = 500 # maximum number of nodes to regard in the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 5000] heuristics/repair/maxnodes = 5000 # minimum number of nodes required to start the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 50] heuristics/repair/minnodes = 50 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/repair/nodesquot = 0.1 # minimum percentage of integer variables that have to be fixed # [type: real, advanced: FALSE, range: [0,1], default: 0.3] heuristics/repair/minfixingrate = 0.3 # priority of heuristic <rins> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1101000] heuristics/rins/priority = -1101000 # frequency for calling primal heuristic <rins> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 25] heuristics/rins/freq = 25 # frequency offset for calling primal heuristic <rins> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/rins/freqofs = 0 # maximal depth level to call primal heuristic <rins> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/rins/maxdepth = -1 # number of nodes added to the contingent of the total nodes # [type: int, advanced: FALSE, range: [0,2147483647], default: 500] heuristics/rins/nodesofs = 500 # maximum number of nodes to regard in the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 5000] heuristics/rins/maxnodes = 5000 # minimum number of nodes required to start the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 50] heuristics/rins/minnodes = 50 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.3] heuristics/rins/nodesquot = 0.3 # number of nodes without incumbent change that heuristic should wait # [type: int, advanced: TRUE, range: [0,2147483647], default: 200] heuristics/rins/nwaitingnodes = 200 # factor by which rins should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/rins/minimprove = 0.01 # minimum percentage of integer variables that have to be fixed # [type: real, advanced: FALSE, range: [0,1], default: 0.3] heuristics/rins/minfixingrate = 0.3 # factor by which the limit on the number of LP depends on the node limit # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 2] heuristics/rins/lplimfac = 2 # should subproblem be created out of the rows in the LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/rins/uselprows = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/rins/copycuts = TRUE # should uct node selection be used at the beginning of the search? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/rins/useuct = FALSE # priority of heuristic <rootsoldiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1005000] heuristics/rootsoldiving/priority = -1005000 # frequency for calling primal heuristic <rootsoldiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 20] heuristics/rootsoldiving/freq = 20 # frequency offset for calling primal heuristic <rootsoldiving> # [type: int, advanced: FALSE, range: [0,65534], default: 5] heuristics/rootsoldiving/freqofs = 5 # maximal depth level to call primal heuristic <rootsoldiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/rootsoldiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/rootsoldiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/rootsoldiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.01] heuristics/rootsoldiving/maxlpiterquot = 0.01 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/rootsoldiving/maxlpiterofs = 1000 # total number of feasible solutions found up to which heuristic is called (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] heuristics/rootsoldiving/maxsols = -1 # maximal diving depth: number of binary/integer variables times depthfac # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0.5] heuristics/rootsoldiving/depthfac = 0.5 # maximal diving depth factor if no feasible solution was found yet # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 2] heuristics/rootsoldiving/depthfacnosol = 2 # soft rounding factor to fade out objective coefficients # [type: real, advanced: TRUE, range: [0,1], default: 0.9] heuristics/rootsoldiving/alpha = 0.9 # priority of heuristic <rounding> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1000] heuristics/rounding/priority = -1000 # frequency for calling primal heuristic <rounding> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] heuristics/rounding/freq = 1 # frequency offset for calling primal heuristic <rounding> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/rounding/freqofs = 0 # maximal depth level to call primal heuristic <rounding> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/rounding/maxdepth = -1 # number of calls per found solution that are considered as standard success, a higher factor causes the heuristic to be called more often # [type: int, advanced: TRUE, range: [-1,2147483647], default: 100] heuristics/rounding/successfactor = 100 # should the heuristic only be called once per node? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/rounding/oncepernode = FALSE # priority of heuristic <shiftandpropagate> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 1000] heuristics/shiftandpropagate/priority = 1000 # frequency for calling primal heuristic <shiftandpropagate> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/shiftandpropagate/freq = 0 # frequency offset for calling primal heuristic <shiftandpropagate> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/shiftandpropagate/freqofs = 0 # maximal depth level to call primal heuristic <shiftandpropagate> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/shiftandpropagate/maxdepth = -1 # The number of propagation rounds used for each propagation # [type: int, advanced: TRUE, range: [-1,1000], default: 10] heuristics/shiftandpropagate/nproprounds = 10 # Should continuous variables be relaxed? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/shiftandpropagate/relax = TRUE # Should domains be reduced by probing? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/shiftandpropagate/probing = TRUE # Should heuristic only be executed if no primal solution was found, yet? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/shiftandpropagate/onlywithoutsol = TRUE # The number of cutoffs before heuristic stops # [type: int, advanced: TRUE, range: [-1,1000000], default: 15] heuristics/shiftandpropagate/cutoffbreaker = 15 # the key for variable sorting: (n)orms down, norms (u)p, (v)iolations down, viola(t)ions up, or (r)andom # [type: char, advanced: TRUE, range: {nrtuv}, default: v] heuristics/shiftandpropagate/sortkey = v # Should variables be sorted for the heuristic? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/shiftandpropagate/sortvars = TRUE # should variable statistics be collected during probing? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/shiftandpropagate/collectstats = TRUE # Should the heuristic stop calculating optimal shift values when no more rows are violated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/shiftandpropagate/stopafterfeasible = TRUE # Should binary variables be shifted first? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/shiftandpropagate/preferbinaries = TRUE # should variables with a zero shifting value be delayed instead of being fixed? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/shiftandpropagate/nozerofixing = FALSE # should binary variables with no locks in one direction be fixed to that direction? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/shiftandpropagate/fixbinlocks = TRUE # should binary variables with no locks be preferred in the ordering? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/shiftandpropagate/binlocksfirst = FALSE # should coefficients and left/right hand sides be normalized by max row coeff? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/shiftandpropagate/normalize = TRUE # should row weight be increased every time the row is violated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/shiftandpropagate/updateweights = FALSE # should implicit integer variables be treated as continuous variables? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/shiftandpropagate/impliscontinuous = TRUE # should the heuristic choose the best candidate in every round? (set to FALSE for static order)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/shiftandpropagate/selectbest = FALSE # maximum percentage of allowed cutoffs before stopping the heuristic # [type: real, advanced: TRUE, range: [0,2], default: 0] heuristics/shiftandpropagate/maxcutoffquot = 0 # minimum fixing rate over all variables (including continuous) to solve LP # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/shiftandpropagate/minfixingratelp = 0 # priority of heuristic <shifting> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -5000] heuristics/shifting/priority = -5000 # frequency for calling primal heuristic <shifting> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/shifting/freq = 10 # frequency offset for calling primal heuristic <shifting> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/shifting/freqofs = 0 # maximal depth level to call primal heuristic <shifting> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/shifting/maxdepth = -1 # priority of heuristic <subnlp> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -2000000] heuristics/subnlp/priority = -2000000 # frequency for calling primal heuristic <subnlp> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] heuristics/subnlp/freq = 1 # frequency offset for calling primal heuristic <subnlp> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/subnlp/freqofs = 0 # maximal depth level to call primal heuristic <subnlp> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/subnlp/maxdepth = -1 # verbosity level of NLP solver # [type: int, advanced: FALSE, range: [0,65535], default: 0] heuristics/subnlp/nlpverblevel = 0 # number of nodes added to the current number of nodes when computing itercontingent (higher value runs heuristic more often in early search) # [type: int, advanced: FALSE, range: [0,2147483647], default: 1600] heuristics/subnlp/nodesoffset = 1600 # factor on number of nodes in SCIP (plus nodesoffset) to compute itercontingent (higher value runs heuristics more frequently) # [type: real, advanced: FALSE, range: [0,1e+20], default: 0.3] heuristics/subnlp/nodesfactor = 0.3 # exponent for power of success rate to be multiplied with itercontingent (lower value decreases impact of success rate) # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 1] heuristics/subnlp/successrateexp = 1 # number of iterations used for initial NLP solves # [type: int, advanced: FALSE, range: [0,2147483647], default: 300] heuristics/subnlp/iterinit = 300 # number of successful NLP solves until switching to iterlimit guess and using success rate # [type: int, advanced: FALSE, range: [0,2147483647], default: 2] heuristics/subnlp/ninitsolves = 2 # minimal number of iterations for NLP solves # [type: int, advanced: FALSE, range: [0,2147483647], default: 20] heuristics/subnlp/itermin = 20 # absolute optimality tolerance to use for NLP solves # [type: real, advanced: TRUE, range: [0,1], default: 1e-07] heuristics/subnlp/opttol = 1e-07 # factor on SCIP feasibility tolerance for NLP solves if resolving when NLP solution not feasible in CIP # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/subnlp/feastolfactor = 0.1 # limit on number of presolve rounds in sub-SCIP (-1 for unlimited, 0 for no presolve) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] heuristics/subnlp/maxpresolverounds = -1 # presolve emphasis in sub-SCIP (0: default, 1: aggressive, 2: fast, 3: off) # [type: int, advanced: FALSE, range: [0,3], default: 2] heuristics/subnlp/presolveemphasis = 2 # whether to set cutoff in sub-SCIP to current primal bound # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/subnlp/setcutoff = TRUE # whether to add constraints that forbid specific fixings that turned out to be infeasible # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/subnlp/forbidfixings = FALSE # whether to keep SCIP copy or to create new copy each time heuristic is applied # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/subnlp/keepcopy = TRUE # percentage of NLP solves with infeasible status required to tell NLP solver to expect an infeasible NLP # [type: real, advanced: FALSE, range: [0,1], default: 0] heuristics/subnlp/expectinfeas = 0 # priority of heuristic <trivial> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 10000] heuristics/trivial/priority = 10000 # frequency for calling primal heuristic <trivial> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/trivial/freq = 0 # frequency offset for calling primal heuristic <trivial> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/trivial/freqofs = 0 # maximal depth level to call primal heuristic <trivial> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/trivial/maxdepth = -1 # priority of heuristic <trivialnegation> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 40000] heuristics/trivialnegation/priority = 40000 # frequency for calling primal heuristic <trivialnegation> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/trivialnegation/freq = 0 # frequency offset for calling primal heuristic <trivialnegation> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/trivialnegation/freqofs = 0 # maximal depth level to call primal heuristic <trivialnegation> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: 0] heuristics/trivialnegation/maxdepth = 0 # priority of heuristic <trustregion> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1102000] heuristics/trustregion/priority = -1102000 # frequency for calling primal heuristic <trustregion> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/trustregion/freq = -1 # frequency offset for calling primal heuristic <trustregion> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/trustregion/freqofs = 0 # maximal depth level to call primal heuristic <trustregion> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/trustregion/maxdepth = -1 # number of nodes added to the contingent of the total nodes # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/trustregion/nodesofs = 1000 # the number of binary variables necessary to run the heuristic # [type: int, advanced: FALSE, range: [1,2147483647], default: 10] heuristics/trustregion/minbinvars = 10 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.05] heuristics/trustregion/nodesquot = 0.05 # factor by which the limit on the number of LP depends on the node limit # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 1.5] heuristics/trustregion/lplimfac = 1.5 # minimum number of nodes required to start the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 100] heuristics/trustregion/minnodes = 100 # maximum number of nodes to regard in the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 10000] heuristics/trustregion/maxnodes = 10000 # number of nodes without incumbent change that heuristic should wait # [type: int, advanced: TRUE, range: [0,2147483647], default: 1] heuristics/trustregion/nwaitingnodes = 1 # should subproblem be created out of the rows in the LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/trustregion/uselprows = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/trustregion/copycuts = TRUE # limit on number of improving incumbent solutions in sub-CIP # [type: int, advanced: FALSE, range: [-1,2147483647], default: 3] heuristics/trustregion/bestsollimit = 3 # the penalty for each change in the binary variables from the candidate solution # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 100] heuristics/trustregion/violpenalty = 100 # the minimum absolute improvement in the objective function value # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.01] heuristics/trustregion/objminimprove = 0.01 # priority of heuristic <trysol> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -3000000] heuristics/trysol/priority = -3000000 # frequency for calling primal heuristic <trysol> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] heuristics/trysol/freq = 1 # frequency offset for calling primal heuristic <trysol> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/trysol/freqofs = 0 # maximal depth level to call primal heuristic <trysol> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/trysol/maxdepth = -1 # priority of heuristic <twoopt> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -20100] heuristics/twoopt/priority = -20100 # frequency for calling primal heuristic <twoopt> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/twoopt/freq = -1 # frequency offset for calling primal heuristic <twoopt> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/twoopt/freqofs = 0 # maximal depth level to call primal heuristic <twoopt> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/twoopt/maxdepth = -1 # Should Integer-2-Optimization be applied or not? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/twoopt/intopt = FALSE # user parameter to determine number of nodes to wait after last best solution before calling heuristic # [type: int, advanced: TRUE, range: [0,10000], default: 0] heuristics/twoopt/waitingnodes = 0 # maximum number of slaves for one master variable # [type: int, advanced: TRUE, range: [-1,1000000], default: 199] heuristics/twoopt/maxnslaves = 199 # parameter to determine the percentage of rows two variables have to share before they are considered equal # [type: real, advanced: TRUE, range: [0,1], default: 0.5] heuristics/twoopt/matchingrate = 0.5 # priority of heuristic <undercover> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1110000] heuristics/undercover/priority = -1110000 # frequency for calling primal heuristic <undercover> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/undercover/freq = 0 # frequency offset for calling primal heuristic <undercover> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/undercover/freqofs = 0 # maximal depth level to call primal heuristic <undercover> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/undercover/maxdepth = -1 # prioritized sequence of fixing values used ('l'p relaxation, 'n'lp relaxation, 'i'ncumbent solution) # [type: string, advanced: FALSE, default: "li"] heuristics/undercover/fixingalts = "li" # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 500] heuristics/undercover/maxnodes = 500 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 500] heuristics/undercover/minnodes = 500 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 500] heuristics/undercover/nodesofs = 500 # weight for conflict score in fixing order # [type: real, advanced: TRUE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 1000] heuristics/undercover/conflictweight = 1000 # weight for cutoff score in fixing order # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 1] heuristics/undercover/cutoffweight = 1 # weight for inference score in fixing order # [type: real, advanced: TRUE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 1] heuristics/undercover/inferenceweight = 1 # maximum coversize (as fraction of total number of variables) # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/undercover/maxcoversizevars = 1 # maximum coversize (as ratio to the percentage of non-affected constraints) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 1.79769313486232e+308] heuristics/undercover/maxcoversizeconss = 1.79769313486232e+308 # minimum percentage of nonlinear constraints in the original problem # [type: real, advanced: TRUE, range: [0,1], default: 0.15] heuristics/undercover/mincoveredrel = 0.15 # factor by which the heuristic should at least improve the incumbent # [type: real, advanced: TRUE, range: [-1,1], default: 0] heuristics/undercover/minimprove = 0 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/undercover/nodesquot = 0.1 # fraction of covering variables in the last cover which need to change their value when recovering # [type: real, advanced: TRUE, range: [0,1], default: 0.9] heuristics/undercover/recoverdiv = 0.9 # minimum number of nonlinear constraints in the original problem # [type: int, advanced: TRUE, range: [0,2147483647], default: 5] heuristics/undercover/mincoveredabs = 5 # maximum number of backtracks in fix-and-propagate # [type: int, advanced: TRUE, range: [0,2147483647], default: 6] heuristics/undercover/maxbacktracks = 6 # maximum number of recoverings # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] heuristics/undercover/maxrecovers = 0 # maximum number of reorderings of the fixing order # [type: int, advanced: TRUE, range: [0,2147483647], default: 1] heuristics/undercover/maxreorders = 1 # objective function of the covering problem (influenced nonlinear 'c'onstraints/'t'erms, 'd'omain size, 'l'ocks, 'm'in of up/down locks, 'u'nit penalties) # [type: char, advanced: TRUE, range: {cdlmtu}, default: u] heuristics/undercover/coveringobj = u # order in which variables should be fixed (increasing 'C'onflict score, decreasing 'c'onflict score, increasing 'V'ariable index, decreasing 'v'ariable index # [type: char, advanced: TRUE, range: {CcVv}, default: v] heuristics/undercover/fixingorder = v # should the heuristic be called at root node before cut separation? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/undercover/beforecuts = TRUE # should integer variables in the cover be fixed first? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/undercover/fixintfirst = FALSE # shall LP values for integer vars be rounded according to locks? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/undercover/locksrounding = TRUE # should we only fix variables in order to obtain a convex problem? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/undercover/onlyconvexify = FALSE # should the NLP heuristic be called to polish a feasible solution? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/undercover/postnlp = TRUE # should bounddisjunction constraints be covered (or just copied)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/undercover/coverbd = FALSE # should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/undercover/copycuts = TRUE # shall the cover be reused if a conflict was added after an infeasible subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/undercover/reusecover = FALSE # priority of heuristic <vbounds> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 2500] heuristics/vbounds/priority = 2500 # frequency for calling primal heuristic <vbounds> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/vbounds/freq = 0 # frequency offset for calling primal heuristic <vbounds> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/vbounds/freqofs = 0 # maximal depth level to call primal heuristic <vbounds> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/vbounds/maxdepth = -1 # minimum percentage of integer variables that have to be fixed # [type: real, advanced: FALSE, range: [0,1], default: 0.65] heuristics/vbounds/minintfixingrate = 0.65 # minimum percentage of variables that have to be fixed within sub-SCIP (integer and continuous) # [type: real, advanced: FALSE, range: [0,1], default: 0.65] heuristics/vbounds/minmipfixingrate = 0.65 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/vbounds/maxnodes = 5000 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 500] heuristics/vbounds/nodesofs = 500 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 500] heuristics/vbounds/minnodes = 500 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/vbounds/nodesquot = 0.1 # factor by which vbounds heuristic should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/vbounds/minimprove = 0.01 # maximum number of propagation rounds during probing (-1 infinity) # [type: int, advanced: TRUE, range: [-1,536870911], default: 2] heuristics/vbounds/maxproprounds = 2 # should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/vbounds/copycuts = TRUE # should more variables be fixed based on variable locks if the fixing rate was not reached? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/vbounds/uselockfixings = FALSE # maximum number of backtracks during the fixing process # [type: int, advanced: TRUE, range: [-1,536870911], default: 10] heuristics/vbounds/maxbacktracks = 10 # which variants of the vbounds heuristic that try to stay feasible should be called? (0: off, 1: w/o looking at obj, 2: only fix to best bound, 4: only fix to worst bound # [type: int, advanced: TRUE, range: [0,7], default: 6] heuristics/vbounds/feasvariant = 6 # which tightening variants of the vbounds heuristic should be called? (0: off, 1: w/o looking at obj, 2: only fix to best bound, 4: only fix to worst bound # [type: int, advanced: TRUE, range: [0,7], default: 7] heuristics/vbounds/tightenvariant = 7 # priority of heuristic <veclendiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1003100] heuristics/veclendiving/priority = -1003100 # frequency for calling primal heuristic <veclendiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/veclendiving/freq = 10 # frequency offset for calling primal heuristic <veclendiving> # [type: int, advanced: FALSE, range: [0,65534], default: 4] heuristics/veclendiving/freqofs = 4 # maximal depth level to call primal heuristic <veclendiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/veclendiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/veclendiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/veclendiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/veclendiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/veclendiving/maxlpiterofs = 1000 # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/veclendiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/veclendiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/veclendiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/veclendiving/maxdiveavgquotnosol = 0 # use one level of backtracking if infeasibility is encountered? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/veclendiving/backtrack = TRUE # percentage of immediate domain changes during probing to trigger LP resolve # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.15] heuristics/veclendiving/lpresolvedomchgquot = 0.15 # LP solve frequency for diving heuristics (0: only after enough domain changes have been found) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] heuristics/veclendiving/lpsolvefreq = 0 # should only LP branching candidates be considered instead of the slower but more general constraint handler diving variable selection? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/veclendiving/onlylpbranchcands = FALSE # priority of heuristic <zirounding> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -500] heuristics/zirounding/priority = -500 # frequency for calling primal heuristic <zirounding> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] heuristics/zirounding/freq = 1 # frequency offset for calling primal heuristic <zirounding> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/zirounding/freqofs = 0 # maximal depth level to call primal heuristic <zirounding> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/zirounding/maxdepth = -1 # determines maximum number of rounding loops # [type: int, advanced: TRUE, range: [-1,2147483647], default: 2] heuristics/zirounding/maxroundingloops = 2 # flag to determine if Zirounding is deactivated after a certain percentage of unsuccessful calls # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/zirounding/stopziround = TRUE # if percentage of found solutions falls below this parameter, Zirounding will be deactivated # [type: real, advanced: TRUE, range: [0,1], default: 0.02] heuristics/zirounding/stoppercentage = 0.02 # determines the minimum number of calls before percentage-based deactivation of Zirounding is applied # [type: int, advanced: TRUE, range: [1,2147483647], default: 1000] heuristics/zirounding/minstopncalls = 1000 # priority of heuristic <zeroobj> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 100] heuristics/zeroobj/priority = 100 # frequency for calling primal heuristic <zeroobj> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/zeroobj/freq = -1 # frequency offset for calling primal heuristic <zeroobj> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/zeroobj/freqofs = 0 # maximal depth level to call primal heuristic <zeroobj> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: 0] heuristics/zeroobj/maxdepth = 0 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 1000] heuristics/zeroobj/maxnodes = 1000 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 100] heuristics/zeroobj/nodesofs = 100 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 100] heuristics/zeroobj/minnodes = 100 # maximum number of LP iterations to be performed in the subproblem # [type: longint, advanced: TRUE, range: [-1,9223372036854775807], default: 5000] heuristics/zeroobj/maxlpiters = 5000 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/zeroobj/nodesquot = 0.1 # factor by which zeroobj should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/zeroobj/minimprove = 0.01 # should all subproblem solutions be added to the original SCIP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/zeroobj/addallsols = FALSE # should heuristic only be executed if no primal solution was found, yet? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/zeroobj/onlywithoutsol = TRUE # should uct node selection be used at the beginning of the search? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/zeroobj/useuct = FALSE # priority of heuristic <simplerounding> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] heuristics/simplerounding/priority = 0 # frequency for calling primal heuristic <simplerounding> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] heuristics/simplerounding/freq = 1 # frequency offset for calling primal heuristic <simplerounding> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/simplerounding/freqofs = 0 # maximal depth level to call primal heuristic <simplerounding> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/simplerounding/maxdepth = -1 # should the heuristic only be called once per node? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/simplerounding/oncepernode = FALSE # priority of separator <clique> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -5000] separating/clique/priority = -5000 # frequency for calling separator <clique> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] separating/clique/freq = 0 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <clique> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 0] separating/clique/maxbounddist = 0 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/clique/delay = FALSE # base for exponential increase of frequency at which separator <clique> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/clique/expbackoff = 4 # factor for scaling weights # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 1000] separating/clique/scaleval = 1000 # maximal number of nodes in branch and bound tree (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10000] separating/clique/maxtreenodes = 10000 # frequency for premature backtracking up to tree level 1 (0: no backtracking) # [type: int, advanced: TRUE, range: [0,2147483647], default: 1000] separating/clique/backtrackfreq = 1000 # maximal number of clique cuts separated per separation round (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 10] separating/clique/maxsepacuts = 10 # maximal number of zero-valued variables extending the clique (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 1000] separating/clique/maxzeroextensions = 1000 # maximal memory size of dense clique table (in kb) # [type: real, advanced: TRUE, range: [0,2097151.99902344], default: 20000] separating/clique/cliquetablemem = 20000 # minimal density of cliques to use a dense clique table # [type: real, advanced: TRUE, range: [0,1], default: 0] separating/clique/cliquedensity = 0 # priority of separator <gomory> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1000] separating/gomory/priority = -1000 # frequency for calling separator <gomory> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] separating/gomory/freq = 10 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <gomory> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 1] separating/gomory/maxbounddist = 1 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/gomory/delay = FALSE # base for exponential increase of frequency at which separator <gomory> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/gomory/expbackoff = 4 # priority of separator <strongcg> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -100000] separating/strongcg/priority = -100000 # frequency for calling separator <strongcg> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] separating/strongcg/freq = 10 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <strongcg> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 0] separating/strongcg/maxbounddist = 0 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/strongcg/delay = FALSE # base for exponential increase of frequency at which separator <strongcg> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/strongcg/expbackoff = 4 # priority of separator <gomorymi> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -100000] separating/gomorymi/priority = -100000 # frequency for calling separator <gomorymi> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] separating/gomorymi/freq = 10 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <gomorymi> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 0] separating/gomorymi/maxbounddist = 0 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/gomorymi/delay = FALSE # base for exponential increase of frequency at which separator <gomorymi> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/gomorymi/expbackoff = 4 # maximal number of gomory separation rounds per node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 5] separating/gomory/maxrounds = 5 # maximal number of gomory separation rounds in the root node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 10] separating/gomory/maxroundsroot = 10 # maximal number of gomory cuts separated per separation round # [type: int, advanced: FALSE, range: [0,2147483647], default: 50] separating/gomory/maxsepacuts = 50 # maximal number of gomory cuts separated per separation round in the root node # [type: int, advanced: FALSE, range: [0,2147483647], default: 200] separating/gomory/maxsepacutsroot = 200 # maximal rank of a gomory cut that could not be scaled to integral coefficients (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] separating/gomory/maxrank = -1 # maximal rank of a gomory cut that could be scaled to integral coefficients (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] separating/gomory/maxrankintegral = -1 # minimal integrality violation of a basis variable in order to try Gomory cut # [type: real, advanced: FALSE, range: [0.0001,0.5], default: 0.01] separating/gomory/away = 0.01 # should generated cuts be removed from the LP if they are no longer tight? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] separating/gomory/dynamiccuts = TRUE # try to scale cuts to integral coefficients # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/gomory/makeintegral = FALSE # if conversion to integral coefficients failed still consider the cut # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/gomory/forcecuts = TRUE # separate rows with integral slack # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/gomory/separaterows = TRUE # should cuts be added to the delayed cut pool? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/gomory/delayedcuts = FALSE # choose side types of row (lhs/rhs) based on basis information? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/gomory/sidetypebasis = TRUE # try to generate strengthened Chvatal-Gomory cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/gomory/trystrongcg = TRUE # Should both Gomory and strong CG cuts be generated (otherwise take best)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/gomory/genbothgomscg = TRUE # priority of separator <impliedbounds> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -50] separating/impliedbounds/priority = -50 # frequency for calling separator <impliedbounds> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] separating/impliedbounds/freq = 10 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <impliedbounds> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 1] separating/impliedbounds/maxbounddist = 1 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/impliedbounds/delay = FALSE # base for exponential increase of frequency at which separator <impliedbounds> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/impliedbounds/expbackoff = 4 # should violated inequalities for cliques with 2 variables be separated? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/impliedbounds/usetwosizecliques = TRUE # priority of separator <interminor> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] separating/interminor/priority = 0 # frequency for calling separator <interminor> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] separating/interminor/freq = -1 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <interminor> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 1] separating/interminor/maxbounddist = 1 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/interminor/delay = FALSE # base for exponential increase of frequency at which separator <interminor> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/interminor/expbackoff = 4 # whether to use strengthened intersection cuts to separate minors # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] separating/interminor/usestrengthening = FALSE # whether to also enforce nonegativity bounds of principle minors # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] separating/interminor/usebounds = FALSE # minimum required violation of a cut # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.0001] separating/interminor/mincutviol = 0.0001 # maximal number of separation rounds per node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 10] separating/interminor/maxrounds = 10 # maximal number of separation rounds in the root node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] separating/interminor/maxroundsroot = -1 # priority of separator <intobj> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -100] separating/intobj/priority = -100 # frequency for calling separator <intobj> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] separating/intobj/freq = -1 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <intobj> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 0] separating/intobj/maxbounddist = 0 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/intobj/delay = FALSE # base for exponential increase of frequency at which separator <intobj> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/intobj/expbackoff = 4 # priority of separator <mcf> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -10000] separating/mcf/priority = -10000 # frequency for calling separator <mcf> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] separating/mcf/freq = 0 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <mcf> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 0] separating/mcf/maxbounddist = 0 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/mcf/delay = FALSE # base for exponential increase of frequency at which separator <mcf> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/mcf/expbackoff = 4 # number of clusters to generate in the shrunken network -- default separation # [type: int, advanced: TRUE, range: [2,32], default: 5] separating/mcf/nclusters = 5 # maximal valid range max(|weights|)/min(|weights|) of row weights # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 1000000] separating/mcf/maxweightrange = 1000000 # maximal number of different deltas to try (-1: unlimited) -- default separation # [type: int, advanced: TRUE, range: [-1,2147483647], default: 20] separating/mcf/maxtestdelta = 20 # should negative values also be tested in scaling? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/mcf/trynegscaling = FALSE # should an additional variable be complemented if f0 = 0? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/mcf/fixintegralrhs = TRUE # should generated cuts be removed from the LP if they are no longer tight? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] separating/mcf/dynamiccuts = TRUE # model type of network (0: auto, 1:directed, 2:undirected) # [type: int, advanced: TRUE, range: [0,2], default: 0] separating/mcf/modeltype = 0 # maximal number of mcf cuts separated per separation round # [type: int, advanced: FALSE, range: [-1,2147483647], default: 100] separating/mcf/maxsepacuts = 100 # maximal number of mcf cuts separated per separation round in the root node -- default separation # [type: int, advanced: FALSE, range: [-1,2147483647], default: 200] separating/mcf/maxsepacutsroot = 200 # maximum inconsistency ratio for separation at all # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0.02] separating/mcf/maxinconsistencyratio = 0.02 # maximum inconsistency ratio of arcs not to be deleted # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0.5] separating/mcf/maxarcinconsistencyratio = 0.5 # should we separate only if the cuts shores are connected? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/mcf/checkcutshoreconnectivity = TRUE # should we separate inequalities based on single-node cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/mcf/separatesinglenodecuts = TRUE # should we separate flowcutset inequalities on the network cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/mcf/separateflowcutset = TRUE # should we separate knapsack cover inequalities on the network cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/mcf/separateknapsack = TRUE # priority of separator <minor> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] separating/minor/priority = 0 # frequency for calling separator <minor> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] separating/minor/freq = 10 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <minor> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 1] separating/minor/maxbounddist = 1 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/minor/delay = FALSE # base for exponential increase of frequency at which separator <minor> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/minor/expbackoff = 4 # constant for the maximum number of minors, i.e., max(const, fac * # quadratic terms) # [type: int, advanced: FALSE, range: [0,2147483647], default: 3000] separating/minor/maxminorsconst = 3000 # factor for the maximum number of minors, i.e., max(const, fac * # quadratic terms) # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 10] separating/minor/maxminorsfac = 10 # minimum required violation of a cut # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.0001] separating/minor/mincutviol = 0.0001 # maximal number of separation rounds per node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 10] separating/minor/maxrounds = 10 # maximal number of separation rounds in the root node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] separating/minor/maxroundsroot = -1 # whether to ignore circle packing constraints during minor detection # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] separating/minor/ignorepackingconss = TRUE # priority of separator <mixing> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -50] separating/mixing/priority = -50 # frequency for calling separator <mixing> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] separating/mixing/freq = 10 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <mixing> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 1] separating/mixing/maxbounddist = 1 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/mixing/delay = FALSE # base for exponential increase of frequency at which separator <mixing> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/mixing/expbackoff = 4 # Should local bounds be used? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/mixing/uselocalbounds = FALSE # Should general integer variables be used to generate cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/mixing/iscutsonints = FALSE # maximal number of mixing separation rounds per node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] separating/mixing/maxrounds = -1 # maximal number of mixing separation rounds in the root node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] separating/mixing/maxroundsroot = -1 # maximal number of consecutive unsuccessful iterations # [type: int, advanced: FALSE, range: [-1,2147483647], default: 10] separating/mixing/maxnunsuccessful = 10 # priority of separator <oddcycle> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -15000] separating/oddcycle/priority = -15000 # frequency for calling separator <oddcycle> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] separating/oddcycle/freq = -1 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <oddcycle> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 1] separating/oddcycle/maxbounddist = 1 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/oddcycle/delay = FALSE # base for exponential increase of frequency at which separator <oddcycle> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/oddcycle/expbackoff = 4 # Should the search method by Groetschel, Lovasz, Schrijver be used? Otherwise use levelgraph method by Hoffman, Padberg. # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] separating/oddcycle/usegls = TRUE # Should odd cycle cuts be lifted? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] separating/oddcycle/liftoddcycles = FALSE # maximal number of oddcycle cuts separated per separation round # [type: int, advanced: FALSE, range: [0,2147483647], default: 5000] separating/oddcycle/maxsepacuts = 5000 # maximal number of oddcycle cuts separated per separation round in the root node # [type: int, advanced: FALSE, range: [0,2147483647], default: 5000] separating/oddcycle/maxsepacutsroot = 5000 # maximal number of oddcycle separation rounds per node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 10] separating/oddcycle/maxrounds = 10 # maximal number of oddcycle separation rounds in the root node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 10] separating/oddcycle/maxroundsroot = 10 # factor for scaling of the arc-weights # [type: int, advanced: TRUE, range: [1,2147483647], default: 1000] separating/oddcycle/scalingfactor = 1000 # add links between a variable and its negated # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/oddcycle/addselfarcs = TRUE # try to repair violated cycles with double appearance of a variable # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/oddcycle/repaircycles = TRUE # separate triangles found as 3-cycles or repaired larger cycles # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/oddcycle/includetriangles = TRUE # Even if a variable is already covered by a cut, still try it as start node for a cycle search? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/oddcycle/multiplecuts = FALSE # Even if a variable is already covered by a cut, still allow another cut to cover it too? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/oddcycle/allowmultiplecuts = TRUE # Choose lifting candidate by coef*lpvalue or only by coef? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/oddcycle/lpliftcoef = FALSE # Calculate lifting coefficient of every candidate in every step (or only if its chosen)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/oddcycle/recalcliftcoef = TRUE # use sorted variable array (unsorted(0), maxlp(1), minlp(2), maxfrac(3), minfrac(4)) # [type: int, advanced: TRUE, range: [0,4], default: 3] separating/oddcycle/sortswitch = 3 # sort level of the root neighbors by fractionality (maxfrac) # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/oddcycle/sortrootneighbors = TRUE # percentage of variables to try the chosen method on [0-100] # [type: int, advanced: TRUE, range: [0,100], default: 0] separating/oddcycle/percenttestvars = 0 # offset of variables to try the chosen method on (additional to the percentage of testvars) # [type: int, advanced: TRUE, range: [0,2147483647], default: 100] separating/oddcycle/offsettestvars = 100 # percentage of nodes allowed in the same level of the level graph [0-100] # [type: int, advanced: TRUE, range: [0,100], default: 100] separating/oddcycle/maxpernodeslevel = 100 # offset of nodes allowed in the same level of the level graph (additional to the percentage of levelnodes) # [type: int, advanced: TRUE, range: [0,2147483647], default: 10] separating/oddcycle/offsetnodeslevel = 10 # maximal number of levels in level graph # [type: int, advanced: TRUE, range: [0,2147483647], default: 20] separating/oddcycle/maxnlevels = 20 # maximal number of oddcycle cuts generated per chosen variable as root of the level graph # [type: int, advanced: TRUE, range: [0,2147483647], default: 1] separating/oddcycle/maxcutsroot = 1 # maximal number of oddcycle cuts generated in every level of the level graph # [type: int, advanced: TRUE, range: [0,2147483647], default: 50] separating/oddcycle/maxcutslevel = 50 # minimal weight on an edge (in level graph or bipartite graph) # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] separating/oddcycle/maxreference = 0 # number of unsuccessful calls at current node # [type: int, advanced: TRUE, range: [0,2147483647], default: 3] separating/oddcycle/maxunsucessfull = 3 # maximal number of other cuts s.t. separation is applied (-1 for direct call) # [type: int, advanced: TRUE, range: [-1,2147483647], default: -1] separating/oddcycle/cutthreshold = -1 # priority of separator <zerohalf> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -6000] separating/zerohalf/priority = -6000 # frequency for calling separator <zerohalf> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] separating/zerohalf/freq = 10 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <zerohalf> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 1] separating/zerohalf/maxbounddist = 1 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/zerohalf/delay = FALSE # base for exponential increase of frequency at which separator <zerohalf> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/zerohalf/expbackoff = 4 # maximal number of zerohalf separation rounds per node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 5] separating/zerohalf/maxrounds = 5 # maximal number of zerohalf separation rounds in the root node (-1: unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 20] separating/zerohalf/maxroundsroot = 20 # maximal number of zerohalf cuts separated per separation round # [type: int, advanced: FALSE, range: [0,2147483647], default: 20] separating/zerohalf/maxsepacuts = 20 # initial seed used for random tie-breaking in cut selection # [type: int, advanced: FALSE, range: [0,2147483647], default: 24301] separating/zerohalf/initseed = 24301 # maximal number of zerohalf cuts separated per separation round in the root node # [type: int, advanced: FALSE, range: [0,2147483647], default: 100] separating/zerohalf/maxsepacutsroot = 100 # maximal number of zerohalf cuts considered per separation round # [type: int, advanced: FALSE, range: [0,2147483647], default: 2000] separating/zerohalf/maxcutcands = 2000 # maximal slack of rows to be used in aggregation # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] separating/zerohalf/maxslack = 0 # maximal slack of rows to be used in aggregation in the root node # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] separating/zerohalf/maxslackroot = 0 # threshold for score of cut relative to best score to be considered good, so that less strict filtering is applied # [type: real, advanced: TRUE, range: [0,1], default: 1] separating/zerohalf/goodscore = 1 # threshold for score of cut relative to best score to be discarded # [type: real, advanced: TRUE, range: [0,1], default: 0.5] separating/zerohalf/badscore = 0.5 # weight of objective parallelism in cut score calculation # [type: real, advanced: TRUE, range: [0,1], default: 0] separating/zerohalf/objparalweight = 0 # weight of efficacy in cut score calculation # [type: real, advanced: TRUE, range: [0,1], default: 1] separating/zerohalf/efficacyweight = 1 # weight of directed cutoff distance in cut score calculation # [type: real, advanced: TRUE, range: [0,1], default: 0] separating/zerohalf/dircutoffdistweight = 0 # maximum parallelism for good cuts # [type: real, advanced: TRUE, range: [0,1], default: 0.1] separating/zerohalf/goodmaxparall = 0.1 # maximum parallelism for non-good cuts # [type: real, advanced: TRUE, range: [0,1], default: 0.1] separating/zerohalf/maxparall = 0.1 # minimal violation to generate zerohalfcut for # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0.1] separating/zerohalf/minviol = 0.1 # should generated cuts be removed from the LP if they are no longer tight? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] separating/zerohalf/dynamiccuts = TRUE # maximal density of row to be used in aggregation # [type: real, advanced: TRUE, range: [0,1], default: 0.05] separating/zerohalf/maxrowdensity = 0.05 # additional number of variables allowed in row on top of density # [type: int, advanced: TRUE, range: [0,2147483647], default: 100] separating/zerohalf/densityoffset = 100 # priority of separator <closecuts> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 1000000] separating/closecuts/priority = 1000000 # frequency for calling separator <closecuts> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] separating/closecuts/freq = -1 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <closecuts> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 1] separating/closecuts/maxbounddist = 1 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/closecuts/delay = FALSE # base for exponential increase of frequency at which separator <closecuts> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/closecuts/expbackoff = 4 # generate close cuts w.r.t. relative interior point (best solution otherwise)? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/closecuts/separelint = TRUE # convex combination value for close cuts # [type: real, advanced: TRUE, range: [0,1], default: 0.3] separating/closecuts/sepacombvalue = 0.3 # threshold on number of generated cuts below which the ordinary separation is started # [type: int, advanced: TRUE, range: [-1,2147483647], default: 50] separating/closecuts/closethres = 50 # include an objective cutoff when computing the relative interior? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/closecuts/inclobjcutoff = FALSE # recompute relative interior point in each separation call? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/closecuts/recomputerelint = FALSE # turn off separation in current node after unsuccessful calls (-1 never turn off) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 0] separating/closecuts/maxunsuccessful = 0 # factor for maximal LP iterations in relative interior computation compared to node LP iterations (negative for no limit) # [type: real, advanced: TRUE, range: [-1,1.79769313486232e+308], default: 10] separating/closecuts/maxlpiterfactor = 10 # priority of separator <rapidlearning> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1200000] separating/rapidlearning/priority = -1200000 # frequency for calling separator <rapidlearning> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 5] separating/rapidlearning/freq = 5 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying separator <rapidlearning> (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: TRUE, range: [0,1], default: 1] separating/rapidlearning/maxbounddist = 1 # should separator be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/rapidlearning/delay = FALSE # base for exponential increase of frequency at which separator <rapidlearning> is called (1: call at each multiple of frequency) # [type: int, advanced: TRUE, range: [1,100], default: 4] separating/rapidlearning/expbackoff = 4 # should the found conflicts be applied in the original SCIP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/rapidlearning/applyconflicts = TRUE # should the found global bound deductions be applied in the original SCIP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/rapidlearning/applybdchgs = TRUE # should the inference values be used as initialization in the original SCIP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/rapidlearning/applyinfervals = TRUE # should the inference values only be used when rapidlearning found other reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/rapidlearning/reducedinfer = FALSE # should the incumbent solution be copied to the original SCIP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/rapidlearning/applyprimalsol = TRUE # should a solved status be copied to the original SCIP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/rapidlearning/applysolved = TRUE # should local LP degeneracy be checked? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/rapidlearning/checkdegeneracy = TRUE # should the progress on the dual bound be checked? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/rapidlearning/checkdualbound = FALSE # should the ratio of leaves proven to be infeasible and exceeding the cutoff bound be checked? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/rapidlearning/checkleaves = FALSE # check whether rapid learning should be executed # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/rapidlearning/checkexec = TRUE # should the (local) objective function be checked? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/rapidlearning/checkobj = FALSE # should the number of solutions found so far be checked? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/rapidlearning/checknsols = TRUE # should rapid learning be applied when there are continuous variables? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] separating/rapidlearning/contvars = FALSE # maximal portion of continuous variables to apply rapid learning # [type: real, advanced: TRUE, range: [0,1], default: 0.3] separating/rapidlearning/contvarsquot = 0.3 # maximal fraction of LP iterations compared to node LP iterations # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0.2] separating/rapidlearning/lpiterquot = 0.2 # minimal degeneracy threshold to allow local rapid learning # [type: real, advanced: TRUE, range: [0,1], default: 0.7] separating/rapidlearning/mindegeneracy = 0.7 # minimal threshold of inf/obj leaves to allow local rapid learning # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 10] separating/rapidlearning/mininflpratio = 10 # minimal ratio of unfixed variables in relation to basis size to allow local rapid learning # [type: real, advanced: TRUE, range: [1,1.79769313486232e+308], default: 2] separating/rapidlearning/minvarconsratio = 2 # maximum problem size (variables) for which rapid learning will be called # [type: int, advanced: TRUE, range: [0,2147483647], default: 10000] separating/rapidlearning/maxnvars = 10000 # maximum problem size (constraints) for which rapid learning will be called # [type: int, advanced: TRUE, range: [0,2147483647], default: 10000] separating/rapidlearning/maxnconss = 10000 # maximum number of overall calls # [type: int, advanced: TRUE, range: [0,2147483647], default: 100] separating/rapidlearning/maxcalls = 100 # maximum number of nodes considered in rapid learning run # [type: int, advanced: TRUE, range: [0,2147483647], default: 5000] separating/rapidlearning/maxnodes = 5000 # minimum number of nodes considered in rapid learning run # [type: int, advanced: TRUE, range: [0,2147483647], default: 500] separating/rapidlearning/minnodes = 500 # number of nodes that should be processed before rapid learning is executed locally based on the progress of the dualbound # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 100] separating/rapidlearning/nwaitingnodes = 100 # should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] separating/rapidlearning/copycuts = TRUE # priority of cut selection rule <hybrid> # [type: int, advanced: FALSE, range: [-536870912,1073741823], default: 8000] cutselection/hybrid/priority = 8000 # weight of efficacy in cut score calculation # [type: real, advanced: FALSE, range: [0,1e+98], default: 1] cutselection/hybrid/efficacyweight = 1 # weight of directed cutoff distance in cut score calculation # [type: real, advanced: FALSE, range: [0,1e+98], default: 0] cutselection/hybrid/dircutoffdistweight = 0 # weight of objective parallelism in cut score calculation # [type: real, advanced: FALSE, range: [0,1e+98], default: 0.1] cutselection/hybrid/objparalweight = 0.1 # weight of integral support in cut score calculation # [type: real, advanced: FALSE, range: [0,1e+98], default: 0.1] cutselection/hybrid/intsupportweight = 0.1 # minimal orthogonality for a cut to enter the LP # [type: real, advanced: FALSE, range: [0,1], default: 0.9] cutselection/hybrid/minortho = 0.9 # minimal orthogonality for a cut to enter the LP in the root node # [type: real, advanced: FALSE, range: [0,1], default: 0.9] cutselection/hybrid/minorthoroot = 0.9 # if true no nonzeros are shown (may improve performance) # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] visual/draftmode = FALSE # type number: 0=default, 1=black and white, 2=manual # [type: int, advanced: FALSE, range: [0,2], default: 0] visual/colorscheme = 0 # integer value to scale points on range 1-10 # [type: int, advanced: FALSE, range: [1,10], default: 2] visual/nonzeroradius = 2 # maximum number of decompositions to write (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] visual/nmaxdecompstowrite = -1 # pdf reader that opens visualizations in decomposition explorer # [type: string, advanced: FALSE, default: "xdg-open"] visual/pdfreader = "xdg-open" # color for master variables in hex code # [type: string, advanced: FALSE, default: "#1340C7"] visual/colors/colormastervars = "#1340C7" # color for master constraints in hex code # [type: string, advanced: FALSE, default: "#1340C7"] visual/colors/colormasterconss = "#1340C7" # color for linking variables in hex code # [type: string, advanced: FALSE, default: "#FFB72D"] visual/colors/colorlinking = "#FFB72D" # color for stairlinking variables in hex code # [type: string, advanced: FALSE, default: "#886100"] visual/colors/colorstairlinking = "#886100" # color for found blocks in hex code # [type: string, advanced: FALSE, default: "#718CDB"] visual/colors/colorblock = "#718CDB" # color for open areas in hex code # [type: string, advanced: FALSE, default: "#FFD88F"] visual/colors/coloropen = "#FFD88F" # color for nonzeros in hex code # [type: string, advanced: FALSE, default: "#000000"] visual/colors/colornonzeros = "#000000" # color for lines in hex code # [type: string, advanced: FALSE, default: "#000000"] visual/colors/colorlines = "#000000" # maximum number of decompositions shown in report (best scores first) # [type: int, advanced: FALSE, range: [1,2147483647], default: 20] visual/report/maxndecomps = 20 # if true a title page is included # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] visual/report/showtitle = TRUE # if true a table of contents is included # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] visual/report/showtoc = TRUE # if true statistics are included for each decomp # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] visual/report/showstatistics = TRUE # if true gnuplot is used for sub-visualizations in report, otherwise LaTeX/Tikz # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] visual/report/usegp = FALSE # priority of relaxation handler <gcg> # [type: int, advanced: FALSE, range: [-536870912,536870911], default: -1] relaxing/gcg/priority = -1 # frequency for calling relaxation handler <gcg> (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] relaxing/gcg/freq = 1 # maximal number of columns per Farkas pricing round # [type: int, advanced: FALSE, range: [1,2147483647], default: 10] pricing/masterpricer/maxcolsroundfarkas = 10 # maximal number of columns per problem to be generated during Farkas pricing # [type: int, advanced: FALSE, range: [1,2147483647], default: 10] pricing/masterpricer/maxcolsprobfarkas = 10 # maximal percentage of Farkas pricing problems that are solved if variables have already been found # [type: real, advanced: FALSE, range: [0,1], default: 1] pricing/masterpricer/relmaxprobsfarkas = 1 # maximal number of pricing rounds per node after the root node # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] pricing/masterpricer/maxroundsredcost = 2147483647 # maximal number of columns per reduced cost pricing round at root node # [type: int, advanced: FALSE, range: [0,2147483647], default: 100] pricing/masterpricer/maxcolsroundredcostroot = 100 # maximal number of columns per reduced cost pricing round # [type: int, advanced: FALSE, range: [0,2147483647], default: 100] pricing/masterpricer/maxcolsroundredcost = 100 # maximal number of columns per problem to be generated during red. cost pricing at root node # [type: int, advanced: FALSE, range: [0,2147483647], default: 10] pricing/masterpricer/maxcolsprobredcostroot = 10 # maximal number of columns per problem to be generated during red. cost pricing # [type: int, advanced: FALSE, range: [0,2147483647], default: 10] pricing/masterpricer/maxcolsprobredcost = 10 # maximal number of successfully solved red. cost pricing problems until pricing loop is aborted # [type: int, advanced: FALSE, range: [1,2147483647], default: 2147483647] pricing/masterpricer/maxsuccessfulprobsredcost = 2147483647 # maximal percentage of red. cost pricing problems that are solved at root node if variables have already been found # [type: real, advanced: FALSE, range: [0,1], default: 1] pricing/masterpricer/relmaxprobsredcostroot = 1 # maximal percentage of red. cost pricing problems that are solved if variables have already been found # [type: real, advanced: FALSE, range: [0,1], default: 1] pricing/masterpricer/relmaxprobsredcost = 1 # maximal percentage of successfully solved red. cost pricing problems until pricing loop is aborted # [type: real, advanced: FALSE, range: [0,1], default: 1] pricing/masterpricer/relmaxsuccessfulprobsredcost = 1 # maximum number of heuristic pricing iterations per pricing call and problem # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] pricing/masterpricer/heurpricingiters = 1 # maximum depth at which heuristic pricing should be performed (-1 for infinity) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] pricing/masterpricer/maxheurdepth = -1 # order by which the pricing problems should be sorted ('i'ndices, 'd'ual solutions of convexity constraints, 'r'eliability from previous rounds, reliability from the 'l'ast nroundscol rounds) # [type: char, advanced: FALSE, range: {dilr}, default: r] pricing/masterpricer/sorting = r # number of previous pricing rounds for which the number of improving columns should be counted # [type: int, advanced: TRUE, range: [1,2147483647], default: 15] pricing/masterpricer/nroundscol = 15 # maximal number of pricing problems to be solved during one pricing loop # [type: int, advanced: TRUE, range: [1,2147483647], default: 2147483647] pricing/masterpricer/chunksize = 2147483647 # frequency at which all pricingproblems should be solved (0 to disable) # [type: int, advanced: FALSE, range: [0,2147483647], default: 10] pricing/masterpricer/eagerfreq = 10 # should pricing be aborted due to integral objective function? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] pricing/masterpricer/abortpricingint = TRUE # gap between dual bound and RMP objective at which pricing is aborted # [type: real, advanced: TRUE, range: [0,1], default: 0] pricing/masterpricer/abortpricinggap = 0 # should additional informations concerning the pricing process be displayed? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] pricing/masterpricer/dispinfos = FALSE # how many threads should be used to concurrently solve the pricing problem (0 to guess threads by OpenMP) # [type: int, advanced: FALSE, range: [0,4096], default: 0] pricing/masterpricer/threads = 0 # should stabilization be performed? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] pricing/masterpricer/stabilization = TRUE # should stabilization be performed in the tree (in nodes other than the root node)? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] pricing/masterpricer/stabilizationtree = FALSE # should the colpool be checked for negative redcost cols before solving the pricing problems? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] pricing/masterpricer/usecolpool = TRUE # should hybridization of smoothing with an ascent method be enabled? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] pricing/masterpricer/stabilization/hybridascent = FALSE # should hybridization of smoothing with an ascent method be enabled if pricing problems cannot be aggregation? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] pricing/masterpricer/stabilization/hybridascentnoagg = FALSE # should artificial variables be used to make the RMP feasible (instead of applying Farkas pricing)? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] pricing/masterpricer/useartificialvars = FALSE # use maxobj for big M objective of artificial variables # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] pricing/masterpricer/usemaxobj = TRUE # only use maxobj for big M objective of artificial variables if it is reliable # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] pricing/masterpricer/onlyreliablebigm = TRUE # factor to use for objective of unbounded variables # [type: real, advanced: FALSE, range: [0,1e+20], default: 1000] pricing/masterpricer/factorunreliable = 1000 # value for for big M objective of artificial variables (negative if max obj should be used) # [type: real, advanced: FALSE, range: [0,1e+20], default: 1000] pricing/masterpricer/bigmartificial = 1000 # should the cutoffbound be applied in master LP solving (0: on, 1:off, 2:auto)? # [type: int, advanced: FALSE, range: [0,2], default: 2] pricing/masterpricer/disablecutoff = 2 # age limit for columns in column pool? (-1 for no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 100] pricing/masterpricer/colpool/agelimit = 100 # factor of -redcost/norm in score function # [type: real, advanced: FALSE, range: [0,10], default: 1] pricing/masterpricer/pricestore/redcostfac = 1 # factor of objective parallelism in score function # [type: real, advanced: FALSE, range: [0,10], default: 0] pricing/masterpricer/pricestore/objparalfac = 0 # factor of orthogonalities in score function # [type: real, advanced: FALSE, range: [0,10], default: 0] pricing/masterpricer/pricestore/orthofac = 0 # minimal orthogonality of columns to add # [type: real, advanced: FALSE, range: [0,1], default: 0] pricing/masterpricer/pricestore/mincolorth = 0 # choice to base efficiacy on # [type: int, advanced: FALSE, range: [0,2], default: 0] pricing/masterpricer/pricestore/efficiacychoice = 0 # should strong branching be used to determine the variables on which the branching is performed? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] branching/ryanfoster/usestrong = FALSE # minimum number of output candidates from phase 0 during strong branching # [type: int, advanced: FALSE, range: [1,2147483647], default: 10] branching/ryanfoster/minphase0outcands = 10 # maximum number of output candidates from phase 0 during strong branching # [type: int, advanced: FALSE, range: [1,2147483647], default: 50] branching/ryanfoster/maxphase0outcands = 50 # maximum number of output candidates from phase 0 as fraction of total cands during strong branching # [type: real, advanced: FALSE, range: [0,1], default: 0.7] branching/ryanfoster/maxphase0outcandsfrac = 0.7 # how much impact should the node gap have on the number of precisely evaluated candidates in phase 1 during strong branching? # [type: real, advanced: FALSE, range: [0,1], default: 0.25] branching/ryanfoster/phase1gapweight = 0.25 # minimum number of output candidates from phase 1 during strong branching # [type: int, advanced: FALSE, range: [1,2147483647], default: 3] branching/ryanfoster/minphase1outcands = 3 # maximum number of output candidates from phase 1 during strong branching # [type: int, advanced: FALSE, range: [1,2147483647], default: 20] branching/ryanfoster/maxphase1outcands = 20 # maximum number of output candidates from phase 1 as fraction of phase 1 cands during strong branching # [type: real, advanced: FALSE, range: [0,1], default: 0.7] branching/ryanfoster/maxphase1outcandsfrac = 0.7 # how much impact should the node gap have on the number of precisely evaluated candidates in phase 2 during strong branching? # [type: real, advanced: FALSE, range: [0,1], default: 1] branching/ryanfoster/phase2gapweight = 1 # should bounds on variables be enforced by constraints(TRUE) or by bounds(FALSE) # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] branching/orig/enforcebycons = FALSE # should pseudocosts be used to determine the variable on which the branching is performed? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] branching/orig/usepseudocosts = TRUE # should branching be performed on the most fractional variable? (only if usepseudocosts = FALSE) # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] branching/orig/mostfrac = FALSE # should the variable on which the branching is performed be selected randomly? (only if usepseudocosts = mostfrac = FALSE) # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] branching/orig/userandom = TRUE # should strong branching with propagation be used to determine the variable on which the branching is performed? (only if usepseudocosts = mostfrac = random = FALSE) # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] branching/orig/usepsstrong = FALSE # should strong branching be used to determine the variable on which the branching is performed? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] branching/orig/usestrong = FALSE # minimum number of output candidates from phase 0 during strong branching # [type: int, advanced: FALSE, range: [1,2147483647], default: 10] branching/orig/minphase0outcands = 10 # maximum number of output candidates from phase 0 during strong branching # [type: int, advanced: FALSE, range: [1,2147483647], default: 50] branching/orig/maxphase0outcands = 50 # maximum number of output candidates from phase 0 as fraction of total cands during strong branching # [type: real, advanced: FALSE, range: [0,1], default: 0.7] branching/orig/maxphase0outcandsfrac = 0.7 # how much impact should the node gap have on the number of precisely evaluated candidates in phase 1 during strong branching? # [type: real, advanced: FALSE, range: [0,1], default: 0.25] branching/orig/phase1gapweight = 0.25 # minimum number of output candidates from phase 1 during strong branching # [type: int, advanced: FALSE, range: [1,2147483647], default: 3] branching/orig/minphase1outcands = 3 # maximum number of output candidates from phase 1 during strong branching # [type: int, advanced: FALSE, range: [1,2147483647], default: 20] branching/orig/maxphase1outcands = 20 # maximum number of output candidates from phase 1 as fraction of phase 1 cands during strong branching # [type: real, advanced: FALSE, range: [0,1], default: 0.7] branching/orig/maxphase1outcandsfrac = 0.7 # how much impact should the node gap have on the number of precisely evaluated candidates in phase 2 during strong branching? # [type: real, advanced: FALSE, range: [0,1], default: 1] branching/orig/phase2gapweight = 1 # weight in score calculations for conflict score # [type: real, advanced: TRUE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 0.01] branching/relpsprob/conflictweight = 0.01 # weight in score calculations for conflict length score # [type: real, advanced: TRUE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 0.0001] branching/relpsprob/conflictlengthweight = 0.0001 # weight in score calculations for inference score # [type: real, advanced: TRUE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 0.1] branching/relpsprob/inferenceweight = 0.1 # weight in score calculations for cutoff score # [type: real, advanced: TRUE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 0.0001] branching/relpsprob/cutoffweight = 0.0001 # weight in score calculations for pseudo cost score # [type: real, advanced: TRUE, range: [-1.79769313486232e+308,1.79769313486232e+308], default: 1] branching/relpsprob/pscostweight = 1 # minimal value for minimum pseudo cost size to regard pseudo cost value as reliable # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 1] branching/relpsprob/minreliable = 1 # maximal value for minimum pseudo cost size to regard pseudo cost value as reliable # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 8] branching/relpsprob/maxreliable = 8 # maximal fraction of branching LP iterations compared to node relaxation LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.5] branching/relpsprob/iterquot = 0.5 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 100000] branching/relpsprob/iterofs = 100000 # maximal number of further variables evaluated without better score # [type: int, advanced: TRUE, range: [1,2147483647], default: 8] branching/relpsprob/maxlookahead = 8 # maximal number of candidates initialized with strong branching per node # [type: int, advanced: FALSE, range: [0,2147483647], default: 100] branching/relpsprob/initcand = 100 # maximal number of bound tightenings before the node is immediately reevaluated (-1: unlimited) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 20] branching/relpsprob/maxbdchgs = 20 # minimal number of bound tightenings before bound changes are applied # [type: int, advanced: TRUE, range: [1,2147483647], default: 1] branching/relpsprob/minbdchgs = 1 # shall the LP be solved during probing? (TRUE) # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] branching/relpsprob/uselp = TRUE # reliability value for probing # [type: real, advanced: FALSE, range: [0,1], default: 0.8] branching/relpsprob/reliability = 0.8 # should strong branching use column generation during variable evaluation? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] branching/bp_strong/stronglite = FALSE # should strong branching run as precise as possible (to generate more valuable training data)? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] branching/bp_strong/strongtraining = FALSE # should infeasibility detected during strong branching be handled immediately, or only if the candidate is selected? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] branching/bp_strong/immediateinf = TRUE # how many times can bounds be changed due to infeasibility during strong branching until an already evaluated variable needs to be reevaluated? # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] branching/bp_strong/reevalage = 1 # minimum number of variables for phase 2 to be executed, otherwise the best candidate from phase 1 will be chosen # [type: int, advanced: FALSE, range: [0,2147483647], default: 4] branching/bp_strong/mincolgencands = 4 # how many candidates should be chosen based on historical strong branching scores as opposed to current heuristic scores in phase 0 (e.g. 0.5 = 50%)? # [type: real, advanced: FALSE, range: [0,1], default: 0.5] branching/bp_strong/histweight = 0.5 # maximum number of strong branching lp iterations, set to 2*avg lp iterations if <= 0 # [type: longint, advanced: FALSE, range: [0,2147483647], default: 2147483647] branching/bp_strong/maxsblpiters = 2147483647 # maximum number of strong branching price rounds, set to 2*avg lp iterations if <= 0 # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] branching/bp_strong/maxsbpricerounds = 2147483647 # maximum number of non-improving candidates until phase 2 is stopped # [type: int, advanced: FALSE, range: [0,2147483647], default: 8] branching/bp_strong/maxlookahead = 8 # how much should the lookahead scale with the overall evaluation effort? (0 = not at all, 1 = fully) # [type: real, advanced: FALSE, range: [0,1], default: 0.5] branching/bp_strong/lookaheadscales = 0.5 # minimum tree depth from which on phase 0 is performed (intended for heuristics like pseudocost branching) # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] branching/bp_strong/minphase0depth = 0 # maximum tree depth up to which phase 1 is performed (intended for heuristics like pseudocost branching) # [type: int, advanced: FALSE, range: [0,2147483647], default: 4] branching/bp_strong/maxphase1depth = 4 # maximum tree depth up to which phase 2 is performed (intended for heuristics like pseudocost branching) # [type: int, advanced: FALSE, range: [0,2147483647], default: 3] branching/bp_strong/maxphase2depth = 3 # how much should the logarithm of the number of variables influence the depth for hybrid branching? (0 = not at all, 1 = fully) # [type: real, advanced: FALSE, range: [0,1], default: 0.5] branching/bp_strong/depthlogweight = 0.5 # what should be the base of the logarithm that is used to compute the depth of hybrid branching? # [type: real, advanced: FALSE, range: [0,2147483647], default: 3.5] branching/bp_strong/depthlogbase = 3.5 # if using a logarithm to compute the depth of hybrid branching, what should be the fraction of the depth assigned to phase 1 that is assigned to phase 0? # [type: real, advanced: FALSE, range: [0,1], default: 0] branching/bp_strong/depthlogphase0frac = 0 # if using a logarithm to compute the depth of hybrid branching, what should be the fraction of the depth assigned to phase 1 that is assigned to phase 2? # [type: real, advanced: FALSE, range: [0,1], default: 0.75] branching/bp_strong/depthlogphase2frac = 0.75 # what percentage of the strong branching score of the candidate that was selected does the heuristic's incumbent need to be considered close (e.g. 0.5 = 50%)? # [type: real, advanced: FALSE, range: [0,1], default: 0.9] branching/bp_strong/closepercentage = 0.9 # how many times in a row can the heuristic be close before strong branching is stopped? # [type: int, advanced: FALSE, range: [-1,2147483647], default: 4] branching/bp_strong/maxconsecheurclose = 4 # with how much weight should strong branching scores be considered for pseudocost scores? # [type: real, advanced: FALSE, range: [0,1], default: 1] branching/bp_strong/sbpseudocostweight = 1 # min count of pseudocost scores for a variable to be considered reliable in phase 1 # [type: int, advanced: FALSE, range: [-1,2147483647], default: 2147483647] branching/bp_strong/phase1reliable = 2147483647 # min count of pseudocost scores for a variable to be considered reliable in phase 2 # [type: int, advanced: FALSE, range: [-1,2147483647], default: 2147483647] branching/bp_strong/phase2reliable = 2147483647 # should phase 0 be performed even if the number of input candidates is already lower or equal to the number of output candidates? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] branching/bp_strong/forcep0 = FALSE # should single-variable-pseudocosts be used as a heuristic for strong branching for Ryan-Foster branching? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] branching/bp_strong/ryanfoster/usepseudocosts = TRUE # should single-variable-fractionality be used as a heuristic for strong branching for Ryan-Foster branching? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] branching/bp_strong/ryanfoster/usemostfrac = FALSE # enable master separator # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] sepa/master/enable = TRUE # parameter returns which parameter setting is used for separation (default = 0, aggressive = 1, fast = 2 # [type: int, advanced: FALSE, range: [0,2], default: 1] sepa/master/paramsetting = 1 # is basis separator enabled? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] sepa/basis/enable = TRUE # is objective constraint of separator enabled? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] sepa/basis/enableobj = FALSE # round obj rhs/lhs of obj constraint if obj is int? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] sepa/basis/enableobjround = FALSE # add cuts generated during pricing to newconss array? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] sepa/basis/enableppcuts = FALSE # is objective constraint for redcost of each pp of separator enabled? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] sepa/basis/enableppobjconss = FALSE # is objective constraint for redcost of each pp during pricing of separator enabled? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] sepa/basis/enableppobjcg = FALSE # generated obj convex dynamically # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] sepa/basis/genobjconvex = FALSE # should positive slack influence the probing objective function? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] sepa/basis/enableposslack = FALSE # exponent of positive slack usage # [type: int, advanced: FALSE, range: [1,2147483647], default: 1] sepa/basis/posslackexp = 1 # automatically generated exponent? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] sepa/basis/posslackexpgen = FALSE # factor for automatically generated exponent # [type: real, advanced: FALSE, range: [1e-09,1e+20], default: 0.1] sepa/basis/posslackexpgenfactor = 0.1 # convex combination factor (= 0.0, use original objective; = 1.0, use face objective) # [type: real, advanced: FALSE, range: [0,1], default: 0] sepa/basis/objconvex = 0 # parameter returns which parameter setting is used for separation (default = 0, aggressive = 1, fast = 2 # [type: int, advanced: FALSE, range: [0,2], default: 0] sepa/basis/paramsetting = 0 # parameter returns if basis is searched with different objective # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] sepa/basis/chgobj = TRUE # parameter returns maximum number of separation rounds in probing LP (-1 if unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] sepa/basis/maxrounds = -1 # parameter returns maximum number of separation rounds in probing LP in root node (-1 if unlimited) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] sepa/basis/maxroundsroot = -1 # parameter returns number of minimum cuts needed to return *result = SCIP_Separated # [type: int, advanced: FALSE, range: [1,2147483647], default: 50] sepa/basis/mincuts = 50 # parameter returns if obj is changed not only in the first round # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] sepa/basis/chgobjallways = FALSE # parameter returns if cuts are forced to enter the LP # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] sepa/basis/forcecuts = FALSE # flag to indicate whether heuristic solving method of solver <knapsack> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] pricingsolver/knapsack/heurenabled = FALSE # flag to indicate whether exact solving method of solver <knapsack> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] pricingsolver/knapsack/exactenabled = TRUE # priority of solver <knapsack> # [type: int, advanced: FALSE, range: [-536870912,536870911], default: 200] pricingsolver/knapsack/priority = 200 # flag to indicate whether heuristic solving method of solver <mip> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] pricingsolver/mip/heurenabled = TRUE # flag to indicate whether exact solving method of solver <mip> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] pricingsolver/mip/exactenabled = TRUE # priority of solver <mip> # [type: int, advanced: FALSE, range: [-536870912,536870911], default: 0] pricingsolver/mip/priority = 0 # should solutions of the pricing MIPs be checked for duplicity? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] pricingsolver/mip/checksols = TRUE # start node limit for heuristic pricing # [type: longint, advanced: TRUE, range: [-1,9223372036854775807], default: 1000] pricingsolver/mip/startnodelimit = 1000 # start stalling node limit for heuristic pricing # [type: longint, advanced: TRUE, range: [-1,9223372036854775807], default: 100] pricingsolver/mip/startstallnodelimit = 100 # start gap limit for heuristic pricing # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0.2] pricingsolver/mip/startgaplimit = 0.2 # start solution limit for heuristic pricing # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10] pricingsolver/mip/startsollimit = 10 # factor by which to increase node limit for heuristic pricing (1.0: add start limit) # [type: real, advanced: TRUE, range: [1,1e+20], default: 1] pricingsolver/mip/nodelimitfac = 1 # factor by which to increase stalling node limit for heuristic pricing (1.0: add start limit) # [type: real, advanced: TRUE, range: [1,1e+20], default: 1] pricingsolver/mip/stallnodelimitfac = 1 # factor by which to decrease gap limit for heuristic pricing (1.0: subtract start limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] pricingsolver/mip/gaplimitfac = 0.8 # factor by which to increase solution limit for heuristic pricing (1.0: add start limit) # [type: real, advanced: TRUE, range: [1,1e+20], default: 1] pricingsolver/mip/sollimitfac = 1 # settings file for pricing problems # [type: string, advanced: TRUE, default: "-"] pricingsolver/mip/settingsfile = "-" # should propagated bound changes in the original be enforced in the master (only proper vars)? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] relaxing/gcg/enforceproper = TRUE # filename to write all bounds to # [type: string, advanced: FALSE, default: ""] eventhdlr/solvingstats/filename = "" # should discretization (TRUE) or convexification (FALSE) approach be used? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] relaxing/gcg/discretization = TRUE # should discretization (TRUE) or convexification (FALSE) approach be used in mixed-integer programs? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] relaxing/gcg/mipdiscretization = TRUE # should identical blocks be aggregated (only for discretization approach)? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] relaxing/gcg/aggregation = TRUE # should additional information about the blocks be displayed? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] relaxing/gcg/dispinfos = FALSE # the decomposition mode that GCG will use. (0: Dantzig-Wolfe (default), 1: Benders' decomposition, 2: no decomposition will be performed) # [type: int, advanced: FALSE, range: [0,2], default: 0] relaxing/gcg/mode = 0 # should bliss be used to check for identical blocks? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] relaxing/gcg/bliss/enabled = TRUE # bliss search node limit (0: unlimited), requires patched bliss version # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] relaxing/gcg/bliss/searchnodelimit = 0 # bliss generator limit (0: unlimited), requires patched bliss version # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] relaxing/gcg/bliss/generatorlimit = 0 # priority of branching rule <empty> # [type: int, advanced: FALSE, range: [-536870912,536870911], default: 1000000] branching/empty/priority = 1000000 # maximal depth level, up to which branching rule <empty> should be used (-1 for no limit) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] branching/empty/maxdepth = -1 # maximal relative distance from current node's dual bound to primal bound compared to best node's dual bound for applying branching rule (0.0: only on current best node, 1.0: on all nodes) # [type: real, advanced: FALSE, range: [0,1], default: 1] branching/empty/maxbounddist = 1 # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] constraints/origbranch/sepafreq = -1 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] constraints/origbranch/propfreq = -1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/origbranch/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: 100] constraints/origbranch/eagerfreq = 100 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 0] constraints/origbranch/maxprerounds = 0 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/origbranch/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/origbranch/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 28] constraints/origbranch/presoltiming = 28 # should the transformed (and possibly presolved problem) be use or original one # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] reading/clsreader/usetransform = TRUE # frequency for separating cuts (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] constraints/decomp/sepafreq = -1 # frequency for propagating domains (-1: never, 0: only in root node) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] constraints/decomp/propfreq = -1 # timing when constraint propagation should be called (1:BEFORELP, 2:DURINGLPLOOP, 4:AFTERLPLOOP, 15:ALWAYS) # [type: int, advanced: TRUE, range: [1,15], default: 1] constraints/decomp/proptiming = 1 # frequency for using all instead of only the useful constraints in separation, propagation and enforcement (-1: never, 0: only in first evaluation) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] constraints/decomp/eagerfreq = -1 # maximal number of presolving rounds the constraint handler participates in (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 0] constraints/decomp/maxprerounds = 0 # should separation method be delayed, if other separators found cuts? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/decomp/delaysepa = FALSE # should propagation method be delayed, if other propagators found reductions? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] constraints/decomp/delayprop = FALSE # timing mask of the constraint handler's presolving method (4:FAST, 8:MEDIUM, 16:EXHAUSTIVE, 32:FINAL) # [type: int, advanced: TRUE, range: [4,60], default: 28] constraints/decomp/presoltiming = 28 # Enables detection # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/enabled = TRUE # Enables postprocessing of complete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/postprocess = TRUE # Maximum number of detection loop rounds # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/maxrounds = 1 # Maximum detection time in seconds # [type: int, advanced: FALSE, range: [0,2147483647], default: 600] detection/maxtime = 600 # Enables detection for the original problem # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/origprob/enabled = TRUE # Weighting method when comparing decompositions for presolved and orig problem # [type: int, advanced: TRUE, range: [0,3], default: 0] detection/origprob/weightinggpresolvedoriginaldecomps = 0 # Limits the number of constraints of a block (aggregation information for block is not calculated when exceeded) # [type: int, advanced: FALSE, range: [0,2147483647], default: 300] detection/aggregation/limitnconssperblock = 300 # Limits the number of variables of a block (aggregation information for block is not calculated when exceeded) # [type: int, advanced: FALSE, range: [0,2147483647], default: 300] detection/aggregation/limitnvarsperblock = 300 # If enabled only decomposition with only continiuous variables in the subproblems are searched # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/benders/onlycontsubpr = FALSE # If enabled only decomposition with only binary variables in the master are searched # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/benders/onlybinmaster = FALSE # Enables benders detection # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/benders/enabled = FALSE # Enables classification # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/classification/enabled = TRUE # If enabled partition duplicates are allowed (for statistical reasons) # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/classification/allowduplicates = FALSE # Enables classification for the original problem # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/origprob/classificationenabled = TRUE # Maximum number of classes per partition # [type: int, advanced: FALSE, range: [0,2147483647], default: 9] detection/classification/maxnclassesperpartition = 9 # Maximum number of classes per partition for large problems (nconss + nvars >= 50000) # [type: int, advanced: FALSE, range: [0,2147483647], default: 5] detection/classification/maxnclassesperpartitionforlargeprobs = 5 # Maximum number of classes a partition can use for voting nblockcandidates # [type: int, advanced: FALSE, range: [0,2147483647], default: 18] detection/blocknrcandidates/maxnclasses = 18 # Enables the use of medianvarspercons calculation for block number candidates calculation # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/blocknrcandidates/medianvarspercons = FALSE # Score calculation for comparing (partial) decompositions (0: max white, 1: border area, 2: classic, 3: max foreseeing white, 4: ppc-max-white, 5: max foreseeing white with aggregation info, 6: ppc-max-white with aggregation info, 7: experimental benders score, 8: strong decomposition score) # [type: int, advanced: FALSE, range: [0,8], default: 4] detection/score/scoretype = 4 # Timelimit for strong decompositions score calculation per partialdec in seconds # [type: real, advanced: FALSE, range: [0,2147483647], default: 30] detection/score/strong_detection/timelimit = 30 # Method for random dual values use for strong decomposition: 1: naive, 2: expected equality exponential distributed, 3: expected overestimation exponential distributed # [type: int, advanced: FALSE, range: [1,3], default: 1] detection/score/strong_detection/dualvalrandommethod = 1 # Convex coefficient for orig dual val, i.e. (1-this coef) is factor for random dual value # [type: real, advanced: FALSE, range: [0,1], default: 0.5] detection/score/strong_detection/coeffactororigvsrandom = 0.5 # flag to indicate whether detector <constype> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/constype/enabled = FALSE # flag to indicate whether detector <constype> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/constype/finishingenabled = FALSE # flag to indicate whether detector <constype> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/constype/postprocessingenabled = FALSE # flag to indicate whether detector <constype> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/constype/skip = FALSE # flag to indicate whether detector <constype> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/constype/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <constype> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/constype/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <constype> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/constype/freqcallround = 1 # maximum round the detector gets called in detection loop <constype> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/constype/maxcallround = 0 # minimum round the detector gets called in detection loop <constype> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/constype/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <constype> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/constype/origfreqcallround = 1 # maximum round the detector gets called in detection loop <constype> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/constype/origmaxcallround = 0 # minimum round the detector gets called in detection loop <constype> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/constype/origmincallround = 0 # priority of detector <constype> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/constype/priority = 0 # flag to indicate whether detector <postprocess> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/postprocess/enabled = FALSE # flag to indicate whether detector <postprocess> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/postprocess/finishingenabled = FALSE # flag to indicate whether detector <postprocess> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/postprocess/postprocessingenabled = TRUE # flag to indicate whether detector <postprocess> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/postprocess/skip = FALSE # flag to indicate whether detector <postprocess> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/postprocess/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <postprocess> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/postprocess/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <postprocess> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/postprocess/freqcallround = 1 # maximum round the detector gets called in detection loop <postprocess> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/postprocess/maxcallround = 2147483647 # minimum round the detector gets called in detection loop <postprocess> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/postprocess/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <postprocess> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/postprocess/origfreqcallround = 1 # maximum round the detector gets called in detection loop <postprocess> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/postprocess/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <postprocess> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/postprocess/origmincallround = 0 # priority of detector <postprocess> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 1000000] detection/detectors/postprocess/priority = 1000000 # should the constraint adjacency be used # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/postprocess/useconssadj = TRUE # flag to indicate whether detector <consclass> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/consclass/enabled = TRUE # flag to indicate whether detector <consclass> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/consclass/finishingenabled = FALSE # flag to indicate whether detector <consclass> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/consclass/postprocessingenabled = FALSE # flag to indicate whether detector <consclass> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/consclass/skip = FALSE # flag to indicate whether detector <consclass> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/consclass/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <consclass> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/consclass/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <consclass> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/consclass/freqcallround = 1 # maximum round the detector gets called in detection loop <consclass> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/consclass/maxcallround = 0 # minimum round the detector gets called in detection loop <consclass> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/consclass/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <consclass> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/consclass/origfreqcallround = 1 # maximum round the detector gets called in detection loop <consclass> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/consclass/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <consclass> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/consclass/origmincallround = 0 # priority of detector <consclass> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/consclass/priority = 0 # maximum number of classes # [type: int, advanced: FALSE, range: [1,2147483647], default: 5] detection/detectors/consclass/maxnclasses = 5 # flag to indicate whether detector <densemasterconss> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/densemasterconss/enabled = TRUE # flag to indicate whether detector <densemasterconss> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/densemasterconss/finishingenabled = FALSE # flag to indicate whether detector <densemasterconss> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/densemasterconss/postprocessingenabled = FALSE # flag to indicate whether detector <densemasterconss> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/densemasterconss/skip = FALSE # flag to indicate whether detector <densemasterconss> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/densemasterconss/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <densemasterconss> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/densemasterconss/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <densemasterconss> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/densemasterconss/freqcallround = 1 # maximum round the detector gets called in detection loop <densemasterconss> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/densemasterconss/maxcallround = 0 # minimum round the detector gets called in detection loop <densemasterconss> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/densemasterconss/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <densemasterconss> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/densemasterconss/origfreqcallround = 1 # maximum round the detector gets called in detection loop <densemasterconss> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/densemasterconss/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <densemasterconss> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/densemasterconss/origmincallround = 0 # priority of detector <densemasterconss> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/densemasterconss/priority = 0 # flag to indicate whether detector <neighborhoodmaster> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/neighborhoodmaster/enabled = TRUE # flag to indicate whether detector <neighborhoodmaster> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/neighborhoodmaster/finishingenabled = FALSE # flag to indicate whether detector <neighborhoodmaster> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/neighborhoodmaster/postprocessingenabled = FALSE # flag to indicate whether detector <neighborhoodmaster> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/neighborhoodmaster/skip = FALSE # flag to indicate whether detector <neighborhoodmaster> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/neighborhoodmaster/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <neighborhoodmaster> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/neighborhoodmaster/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <neighborhoodmaster> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/neighborhoodmaster/freqcallround = 1 # maximum round the detector gets called in detection loop <neighborhoodmaster> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/neighborhoodmaster/maxcallround = 0 # minimum round the detector gets called in detection loop <neighborhoodmaster> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/neighborhoodmaster/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <neighborhoodmaster> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/neighborhoodmaster/origfreqcallround = 1 # maximum round the detector gets called in detection loop <neighborhoodmaster> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/neighborhoodmaster/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <neighborhoodmaster> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/neighborhoodmaster/origmincallround = 0 # priority of detector <neighborhoodmaster> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/neighborhoodmaster/priority = 0 # the maximal ratio of open constraints that are assigned to the master problem # [type: real, advanced: FALSE, range: [0,1], default: 0.2] detection/detectors/neighborhoodmaster/maxratio = 0.2 # flag to indicate whether detector <stairheur> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/stairheur/enabled = FALSE # flag to indicate whether detector <stairheur> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/stairheur/finishingenabled = FALSE # flag to indicate whether detector <stairheur> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/stairheur/postprocessingenabled = FALSE # flag to indicate whether detector <stairheur> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/stairheur/skip = FALSE # flag to indicate whether detector <stairheur> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/stairheur/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <stairheur> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/stairheur/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <stairheur> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/stairheur/freqcallround = 1 # maximum round the detector gets called in detection loop <stairheur> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/stairheur/maxcallround = 2147483647 # minimum round the detector gets called in detection loop <stairheur> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/stairheur/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <stairheur> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/stairheur/origfreqcallround = 1 # maximum round the detector gets called in detection loop <stairheur> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/stairheur/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <stairheur> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/stairheur/origmincallround = 0 # priority of detector <stairheur> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 1200] detection/detectors/stairheur/priority = 1200 # The number of constraints per block (static blocking only) # [type: int, advanced: FALSE, range: [2,1000000], default: 32] detection/detectors/stairheur/nconssperblock = 32 # The maximal number of blocks # [type: int, advanced: FALSE, range: [2,1000000], default: 20] detection/detectors/stairheur/maxblocks = 20 # The minimal number of blocks # [type: int, advanced: FALSE, range: [2,1000000], default: 2] detection/detectors/stairheur/minblocks = 2 # The desired number of blocks. 0 means automatic determination of the number of blocks. # [type: int, advanced: FALSE, range: [0,1000000], default: 0] detection/detectors/stairheur/desiredblocks = 0 # Enable blocking type 'dynamic' # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/stairheur/dynamicblocking = FALSE # Enable blocking type 'static' # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/stairheur/staticblocking = TRUE # Enable blocking type 'as soon as possible # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/stairheur/blockingassoonaspossible = FALSE # Enables multiple decompositions for all enabled blocking types. Ranging from minblocks to maxblocks # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/stairheur/multipledecomps = TRUE # The maximum number of iterations of the ROC-algorithm. -1 for no limit # [type: int, advanced: FALSE, range: [-1,1000000], default: 1000000] detection/detectors/stairheur/maxiterationsROC = 1000000 # flag to indicate whether detector <staircase_lsp> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/staircase_lsp/enabled = FALSE # flag to indicate whether detector <staircase_lsp> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/staircase_lsp/finishingenabled = FALSE # flag to indicate whether detector <staircase_lsp> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/staircase_lsp/postprocessingenabled = FALSE # flag to indicate whether detector <staircase_lsp> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/staircase_lsp/skip = FALSE # flag to indicate whether detector <staircase_lsp> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/staircase_lsp/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <staircase_lsp> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/staircase_lsp/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <staircase_lsp> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/staircase_lsp/freqcallround = 1 # maximum round the detector gets called in detection loop <staircase_lsp> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/staircase_lsp/maxcallround = 2147483647 # minimum round the detector gets called in detection loop <staircase_lsp> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/staircase_lsp/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <staircase_lsp> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/staircase_lsp/origfreqcallround = 1 # maximum round the detector gets called in detection loop <staircase_lsp> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/staircase_lsp/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <staircase_lsp> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/staircase_lsp/origmincallround = 0 # priority of detector <staircase_lsp> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 200] detection/detectors/staircase_lsp/priority = 200 # flag to indicate whether detector <compgreedily> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/compgreedily/enabled = FALSE # flag to indicate whether detector <compgreedily> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/compgreedily/finishingenabled = FALSE # flag to indicate whether detector <compgreedily> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/compgreedily/postprocessingenabled = FALSE # flag to indicate whether detector <compgreedily> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/compgreedily/skip = FALSE # flag to indicate whether detector <compgreedily> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/compgreedily/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <compgreedily> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/compgreedily/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <compgreedily> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/compgreedily/freqcallround = 1 # maximum round the detector gets called in detection loop <compgreedily> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/compgreedily/maxcallround = 2147483647 # minimum round the detector gets called in detection loop <compgreedily> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/compgreedily/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <compgreedily> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/compgreedily/origfreqcallround = 1 # maximum round the detector gets called in detection loop <compgreedily> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/compgreedily/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <compgreedily> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/compgreedily/origmincallround = 0 # priority of detector <compgreedily> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/compgreedily/priority = 0 # flag to indicate whether detector <mastersetcover> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/mastersetcover/enabled = TRUE # flag to indicate whether detector <mastersetcover> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetcover/finishingenabled = FALSE # flag to indicate whether detector <mastersetcover> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetcover/postprocessingenabled = FALSE # flag to indicate whether detector <mastersetcover> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetcover/skip = FALSE # flag to indicate whether detector <mastersetcover> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetcover/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <mastersetcover> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetcover/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <mastersetcover> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/mastersetcover/freqcallround = 1 # maximum round the detector gets called in detection loop <mastersetcover> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/mastersetcover/maxcallround = 2147483647 # minimum round the detector gets called in detection loop <mastersetcover> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/mastersetcover/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <mastersetcover> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/mastersetcover/origfreqcallround = 1 # maximum round the detector gets called in detection loop <mastersetcover> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/mastersetcover/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <mastersetcover> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/mastersetcover/origmincallround = 0 # priority of detector <mastersetcover> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/mastersetcover/priority = 0 # flag to indicate whether detector <mastersetpack> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/mastersetpack/enabled = TRUE # flag to indicate whether detector <mastersetpack> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetpack/finishingenabled = FALSE # flag to indicate whether detector <mastersetpack> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetpack/postprocessingenabled = FALSE # flag to indicate whether detector <mastersetpack> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetpack/skip = FALSE # flag to indicate whether detector <mastersetpack> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetpack/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <mastersetpack> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetpack/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <mastersetpack> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/mastersetpack/freqcallround = 1 # maximum round the detector gets called in detection loop <mastersetpack> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/mastersetpack/maxcallround = 2147483647 # minimum round the detector gets called in detection loop <mastersetpack> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/mastersetpack/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <mastersetpack> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/mastersetpack/origfreqcallround = 1 # maximum round the detector gets called in detection loop <mastersetpack> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/mastersetpack/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <mastersetpack> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/mastersetpack/origmincallround = 0 # priority of detector <mastersetpack> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/mastersetpack/priority = 0 # flag to indicate whether detector <mastersetpart> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/mastersetpart/enabled = TRUE # flag to indicate whether detector <mastersetpart> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetpart/finishingenabled = FALSE # flag to indicate whether detector <mastersetpart> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetpart/postprocessingenabled = FALSE # flag to indicate whether detector <mastersetpart> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetpart/skip = FALSE # flag to indicate whether detector <mastersetpart> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetpart/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <mastersetpart> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/mastersetpart/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <mastersetpart> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/mastersetpart/freqcallround = 1 # maximum round the detector gets called in detection loop <mastersetpart> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/mastersetpart/maxcallround = 2147483647 # minimum round the detector gets called in detection loop <mastersetpart> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/mastersetpart/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <mastersetpart> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/mastersetpart/origfreqcallround = 1 # maximum round the detector gets called in detection loop <mastersetpart> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/mastersetpart/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <mastersetpart> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/mastersetpart/origmincallround = 0 # priority of detector <mastersetpart> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/mastersetpart/priority = 0 # flag to indicate whether detector <hcgpartition> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hcgpartition/enabled = FALSE # flag to indicate whether detector <hcgpartition> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hcgpartition/finishingenabled = FALSE # flag to indicate whether detector <hcgpartition> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hcgpartition/postprocessingenabled = FALSE # flag to indicate whether detector <hcgpartition> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hcgpartition/skip = FALSE # flag to indicate whether detector <hcgpartition> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/hcgpartition/usefullrecall = TRUE # flag to indicate whether emphasis settings for detector <hcgpartition> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hcgpartition/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <hcgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/hcgpartition/freqcallround = 1 # maximum round the detector gets called in detection loop <hcgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/hcgpartition/maxcallround = 0 # minimum round the detector gets called in detection loop <hcgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/hcgpartition/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <hcgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/hcgpartition/origfreqcallround = 1 # maximum round the detector gets called in detection loop <hcgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/hcgpartition/origmaxcallround = 0 # minimum round the detector gets called in detection loop <hcgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/hcgpartition/origmincallround = 0 # priority of detector <hcgpartition> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 1000] detection/detectors/hcgpartition/priority = 1000 # The maximal number of block number candidates # [type: int, advanced: FALSE, range: [0,1000000], default: 1] detection/detectors/hcgpartition/maxnblockcandidates = 1 # The maximal number of blocks (detector is called for all block numbers in [minblocks,maxblocks]) # [type: int, advanced: FALSE, range: [2,1000000], default: 20] detection/detectors/hcgpartition/maxblocks = 20 # The minimal number of blocks (detector is called for all block numbers in [minblocks,maxblocks]) # [type: int, advanced: FALSE, range: [2,1000000], default: 2] detection/detectors/hcgpartition/minblocks = 2 # Factor on how heavy equality (beta) and inequality constraints are measured # [type: real, advanced: FALSE, range: [0,1], default: 0.5] detection/detectors/hcgpartition/beta = 0.5 # Factor on how heavy the standard deviation of the coefficients is measured # [type: real, advanced: FALSE, range: [0,1e+20], default: 0] detection/detectors/hcgpartition/alpha = 0 # Weight of a variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 1] detection/detectors/hcgpartition/varWeight = 1 # Weight of a binary variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 2] detection/detectors/hcgpartition/varWeightBinary = 2 # Weight of a continuos variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 1] detection/detectors/hcgpartition/varWeightContinous = 1 # Weight of a implicit integer variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 2] detection/detectors/hcgpartition/varWeightImplint = 2 # Weight of a integer variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 2] detection/detectors/hcgpartition/varWeightInteger = 2 # Weight of a constraint hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 5] detection/detectors/hcgpartition/consWeight = 5 # Whether to clean up temporary files # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/hcgpartition/tidy = TRUE # Random seed for hmetis # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1] detection/detectors/hcgpartition/randomseed = 1 # Percentage of dummy nodes for metis # [type: real, advanced: FALSE, range: [0,1], default: 0.2] detection/detectors/hcgpartition/dummynodes = 0.2 # Weight for constraint hyperedges that are setpartitioning or covering constraints # [type: int, advanced: FALSE, range: [0,1000000], default: 5] detection/detectors/hcgpartition/consWeightSetppc = 5 # Unbalance factor for metis # [type: real, advanced: FALSE, range: [0,1e+20], default: 5] detection/detectors/hcgpartition/ubfactor = 5 # Should the metis output be displayed # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hcgpartition/metisverbose = FALSE # Should the rb or kway method be used for partitioning by metis # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/hcgpartition/metisuseptyperb = TRUE # Should the problem be used for metis files or a temporary name # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hcgpartition/realname = FALSE # flag to indicate whether detector <hrgpartition> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrgpartition/enabled = FALSE # flag to indicate whether detector <hrgpartition> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrgpartition/finishingenabled = FALSE # flag to indicate whether detector <hrgpartition> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrgpartition/postprocessingenabled = FALSE # flag to indicate whether detector <hrgpartition> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrgpartition/skip = FALSE # flag to indicate whether detector <hrgpartition> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/hrgpartition/usefullrecall = TRUE # flag to indicate whether emphasis settings for detector <hrgpartition> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrgpartition/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <hrgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/hrgpartition/freqcallround = 1 # maximum round the detector gets called in detection loop <hrgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/hrgpartition/maxcallround = 0 # minimum round the detector gets called in detection loop <hrgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/hrgpartition/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <hrgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/hrgpartition/origfreqcallround = 1 # maximum round the detector gets called in detection loop <hrgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/hrgpartition/origmaxcallround = 0 # minimum round the detector gets called in detection loop <hrgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/hrgpartition/origmincallround = 0 # priority of detector <hrgpartition> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 1000] detection/detectors/hrgpartition/priority = 1000 # Limit for sum of nvars and nconss for enabling this detector in default # [type: int, advanced: TRUE, range: [0,2147483647], default: 10000] detection/detectors/hrgpartition/limitnconssnvarsdefault = 10000 # Should this detector be enabled even the limit nconssnvars is exceeded # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrgpartition/enabledforlargeproblems = FALSE # The maximal number of block number candidates # [type: int, advanced: FALSE, range: [0,1000000], default: 3] detection/detectors/hrgpartition/maxnblockcandidates = 3 # The maximal number of blocks (detector is called for all block numbers in [minblocks,maxblocks]) # [type: int, advanced: FALSE, range: [2,1000000], default: 20] detection/detectors/hrgpartition/maxblocks = 20 # The minimal number of blocks (detector is called for all block numbers in [minblocks,maxblocks]) # [type: int, advanced: FALSE, range: [2,1000000], default: 2] detection/detectors/hrgpartition/minblocks = 2 # Factor on how heavy equality (beta) and inequality constraints are measured # [type: real, advanced: FALSE, range: [0,1], default: 0.5] detection/detectors/hrgpartition/beta = 0.5 # Factor on how heavy the standard deviation of the coefficients is measured # [type: real, advanced: FALSE, range: [0,1e+20], default: 0] detection/detectors/hrgpartition/alpha = 0 # Weight of a variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 1] detection/detectors/hrgpartition/varWeight = 1 # Weight of a binary variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 2] detection/detectors/hrgpartition/varWeightBinary = 2 # Weight of a continuos variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 1] detection/detectors/hrgpartition/varWeightContinous = 1 # Weight of a implicit integer variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 2] detection/detectors/hrgpartition/varWeightImplint = 2 # Weight of a integer variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 2] detection/detectors/hrgpartition/varWeightInteger = 2 # Weight of a constraint hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 5] detection/detectors/hrgpartition/consWeight = 5 # Whether to clean up temporary files # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/hrgpartition/tidy = TRUE # Random seed for hmetis # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1] detection/detectors/hrgpartition/randomseed = 1 # Percentage of dummy nodes for metis # [type: real, advanced: FALSE, range: [0,1], default: 0.2] detection/detectors/hrgpartition/dummynodes = 0.2 # Weight for constraint hyperedges that are setpartitioning or covering constraints # [type: int, advanced: FALSE, range: [0,1000000], default: 5] detection/detectors/hrgpartition/consWeightSetppc = 5 # Unbalance factor for metis # [type: real, advanced: FALSE, range: [0,1e+20], default: 5] detection/detectors/hrgpartition/ubfactor = 5 # Should the metis output be displayed # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrgpartition/metisverbose = FALSE # Should the rb or kway method be used for partitioning by metis # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/hrgpartition/metisuseptyperb = TRUE # Should the problem be used for metis files or a temporary name # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrgpartition/realname = FALSE # flag to indicate whether detector <hrcgpartition> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrcgpartition/enabled = FALSE # flag to indicate whether detector <hrcgpartition> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrcgpartition/finishingenabled = FALSE # flag to indicate whether detector <hrcgpartition> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrcgpartition/postprocessingenabled = FALSE # flag to indicate whether detector <hrcgpartition> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrcgpartition/skip = FALSE # flag to indicate whether detector <hrcgpartition> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/hrcgpartition/usefullrecall = TRUE # flag to indicate whether emphasis settings for detector <hrcgpartition> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrcgpartition/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <hrcgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/hrcgpartition/freqcallround = 1 # maximum round the detector gets called in detection loop <hrcgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/hrcgpartition/maxcallround = 1 # minimum round the detector gets called in detection loop <hrcgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/hrcgpartition/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <hrcgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/hrcgpartition/origfreqcallround = 1 # maximum round the detector gets called in detection loop <hrcgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/hrcgpartition/origmaxcallround = 1 # minimum round the detector gets called in detection loop <hrcgpartition> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/hrcgpartition/origmincallround = 0 # priority of detector <hrcgpartition> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 1000] detection/detectors/hrcgpartition/priority = 1000 # The maximal number of block number candidates # [type: int, advanced: FALSE, range: [0,1000000], default: 3] detection/detectors/hrcgpartition/maxnblockcandidates = 3 # The maximal number of blocks (detector is called for all block numbers in [minblocks,maxblocks]) # [type: int, advanced: FALSE, range: [2,1000000], default: 20] detection/detectors/hrcgpartition/maxblocks = 20 # The minimal number of blocks (detector is called for all block numbers in [minblocks,maxblocks]) # [type: int, advanced: FALSE, range: [2,1000000], default: 2] detection/detectors/hrcgpartition/minblocks = 2 # Factor on how heavy equality (beta) and inequality constraints are measured # [type: real, advanced: FALSE, range: [0,1], default: 0.5] detection/detectors/hrcgpartition/beta = 0.5 # Factor on how heavy the standard deviation of the coefficients is measured # [type: real, advanced: FALSE, range: [0,1e+20], default: 0] detection/detectors/hrcgpartition/alpha = 0 # Weight of a variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 2] detection/detectors/hrcgpartition/varWeight = 2 # Weight of a binary variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 3] detection/detectors/hrcgpartition/varWeightBinary = 3 # Weight of a continuos variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 2] detection/detectors/hrcgpartition/varWeightContinous = 2 # Weight of a implicit integer variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 3] detection/detectors/hrcgpartition/varWeightImplint = 3 # Weight of a integer variable hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 3] detection/detectors/hrcgpartition/varWeightInteger = 3 # Weight of a constraint hyperedge # [type: int, advanced: FALSE, range: [0,1000000], default: 1] detection/detectors/hrcgpartition/consWeight = 1 # Whether to clean up temporary files # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/hrcgpartition/tidy = TRUE # Random seed for hmetis # [type: int, advanced: FALSE, range: [-1,2147483647], default: 1] detection/detectors/hrcgpartition/randomseed = 1 # Percentage of dummy nodes for metis # [type: real, advanced: FALSE, range: [0,1], default: 0.2] detection/detectors/hrcgpartition/dummynodes = 0.2 # Weight for constraint hyperedges that are setpartitioning or covering constraints # [type: int, advanced: FALSE, range: [0,1000000], default: 5] detection/detectors/hrcgpartition/consWeightSetppc = 5 # Unbalance factor for metis # [type: real, advanced: FALSE, range: [0,1e+20], default: 5] detection/detectors/hrcgpartition/ubfactor = 5 # Should the metis output be displayed # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrcgpartition/metisverbose = FALSE # Should the rb or kway method be used for partitioning by metis # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/hrcgpartition/metisuseptyperb = TRUE # Should the problem be used for metis files or a temporary name # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/hrcgpartition/realname = FALSE # flag to indicate whether detector <connectedbase> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/connectedbase/enabled = FALSE # flag to indicate whether detector <connectedbase> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/connectedbase/finishingenabled = TRUE # flag to indicate whether detector <connectedbase> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/connectedbase/postprocessingenabled = FALSE # flag to indicate whether detector <connectedbase> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/connectedbase/skip = FALSE # flag to indicate whether detector <connectedbase> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/connectedbase/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <connectedbase> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/connectedbase/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <connectedbase> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/connectedbase/freqcallround = 1 # maximum round the detector gets called in detection loop <connectedbase> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/connectedbase/maxcallround = 2147483647 # minimum round the detector gets called in detection loop <connectedbase> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/connectedbase/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <connectedbase> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/connectedbase/origfreqcallround = 1 # maximum round the detector gets called in detection loop <connectedbase> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/connectedbase/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <connectedbase> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/connectedbase/origmincallround = 0 # priority of detector <connectedbase> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/connectedbase/priority = 0 # should the constraint adjacency be used # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/connectedbase/useconssadj = TRUE # flag to indicate whether detector <connected_nonewlinkingvars> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/connected_nonewlinkingvars/enabled = FALSE # flag to indicate whether detector <connected_nonewlinkingvars> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/connected_nonewlinkingvars/finishingenabled = FALSE # flag to indicate whether detector <connected_nonewlinkingvars> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/connected_nonewlinkingvars/postprocessingenabled = FALSE # flag to indicate whether detector <connected_nonewlinkingvars> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/connected_nonewlinkingvars/skip = FALSE # flag to indicate whether detector <connected_nonewlinkingvars> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/connected_nonewlinkingvars/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <connected_nonewlinkingvars> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/connected_nonewlinkingvars/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <connected_nonewlinkingvars> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/connected_nonewlinkingvars/freqcallround = 1 # maximum round the detector gets called in detection loop <connected_nonewlinkingvars> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/connected_nonewlinkingvars/maxcallround = 2147483647 # minimum round the detector gets called in detection loop <connected_nonewlinkingvars> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/connected_nonewlinkingvars/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <connected_nonewlinkingvars> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/connected_nonewlinkingvars/origfreqcallround = 1 # maximum round the detector gets called in detection loop <connected_nonewlinkingvars> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/connected_nonewlinkingvars/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <connected_nonewlinkingvars> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/connected_nonewlinkingvars/origmincallround = 0 # priority of detector <connected_nonewlinkingvars> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/connected_nonewlinkingvars/priority = 0 # flag to indicate whether detector <generalmastersetpack> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/generalmastersetpack/enabled = TRUE # flag to indicate whether detector <generalmastersetpack> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetpack/finishingenabled = FALSE # flag to indicate whether detector <generalmastersetpack> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetpack/postprocessingenabled = FALSE # flag to indicate whether detector <generalmastersetpack> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetpack/skip = FALSE # flag to indicate whether detector <generalmastersetpack> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetpack/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <generalmastersetpack> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetpack/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <generalmastersetpack> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/generalmastersetpack/freqcallround = 1 # maximum round the detector gets called in detection loop <generalmastersetpack> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/generalmastersetpack/maxcallround = 0 # minimum round the detector gets called in detection loop <generalmastersetpack> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/generalmastersetpack/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <generalmastersetpack> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/generalmastersetpack/origfreqcallround = 1 # maximum round the detector gets called in detection loop <generalmastersetpack> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/generalmastersetpack/origmaxcallround = 0 # minimum round the detector gets called in detection loop <generalmastersetpack> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/generalmastersetpack/origmincallround = 0 # priority of detector <generalmastersetpack> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/generalmastersetpack/priority = 0 # flag to indicate whether detector <generalmastersetpart> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/generalmastersetpart/enabled = TRUE # flag to indicate whether detector <generalmastersetpart> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetpart/finishingenabled = FALSE # flag to indicate whether detector <generalmastersetpart> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetpart/postprocessingenabled = FALSE # flag to indicate whether detector <generalmastersetpart> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetpart/skip = FALSE # flag to indicate whether detector <generalmastersetpart> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetpart/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <generalmastersetpart> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetpart/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <generalmastersetpart> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/generalmastersetpart/freqcallround = 1 # maximum round the detector gets called in detection loop <generalmastersetpart> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/generalmastersetpart/maxcallround = 0 # minimum round the detector gets called in detection loop <generalmastersetpart> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/generalmastersetpart/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <generalmastersetpart> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/generalmastersetpart/origfreqcallround = 1 # maximum round the detector gets called in detection loop <generalmastersetpart> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/generalmastersetpart/origmaxcallround = 0 # minimum round the detector gets called in detection loop <generalmastersetpart> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/generalmastersetpart/origmincallround = 0 # priority of detector <generalmastersetpart> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/generalmastersetpart/priority = 0 # flag to indicate whether detector <generalmastersetcover> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/generalmastersetcover/enabled = TRUE # flag to indicate whether detector <generalmastersetcover> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetcover/finishingenabled = FALSE # flag to indicate whether detector <generalmastersetcover> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetcover/postprocessingenabled = FALSE # flag to indicate whether detector <generalmastersetcover> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetcover/skip = FALSE # flag to indicate whether detector <generalmastersetcover> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetcover/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <generalmastersetcover> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/generalmastersetcover/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <generalmastersetcover> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/generalmastersetcover/freqcallround = 1 # maximum round the detector gets called in detection loop <generalmastersetcover> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/generalmastersetcover/maxcallround = 0 # minimum round the detector gets called in detection loop <generalmastersetcover> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/generalmastersetcover/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <generalmastersetcover> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/generalmastersetcover/origfreqcallround = 1 # maximum round the detector gets called in detection loop <generalmastersetcover> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/generalmastersetcover/origmaxcallround = 0 # minimum round the detector gets called in detection loop <generalmastersetcover> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/generalmastersetcover/origmincallround = 0 # priority of detector <generalmastersetcover> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/generalmastersetcover/priority = 0 # flag to indicate whether detector <varclass> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/varclass/enabled = TRUE # flag to indicate whether detector <varclass> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/varclass/finishingenabled = FALSE # flag to indicate whether detector <varclass> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/varclass/postprocessingenabled = FALSE # flag to indicate whether detector <varclass> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/varclass/skip = FALSE # flag to indicate whether detector <varclass> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/varclass/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <varclass> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/varclass/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <varclass> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/varclass/freqcallround = 1 # maximum round the detector gets called in detection loop <varclass> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/varclass/maxcallround = 0 # minimum round the detector gets called in detection loop <varclass> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/varclass/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <varclass> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/varclass/origfreqcallround = 1 # maximum round the detector gets called in detection loop <varclass> # [type: int, advanced: FALSE, range: [0,2147483647], default: 2147483647] detection/detectors/varclass/origmaxcallround = 2147483647 # minimum round the detector gets called in detection loop <varclass> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/varclass/origmincallround = 0 # priority of detector <varclass> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 0] detection/detectors/varclass/priority = 0 # maximum number of classes # [type: int, advanced: FALSE, range: [1,2147483647], default: 8] detection/detectors/varclass/maxnclasses = 8 # flag to indicate whether detector <isomorph> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/isomorph/enabled = FALSE # flag to indicate whether detector <isomorph> is enabled for finishing of incomplete decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/isomorph/finishingenabled = FALSE # flag to indicate whether detector <isomorph> is enabled for postprocessing of finished decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/isomorph/postprocessingenabled = FALSE # flag to indicate whether detector <isomorph> should be skipped if others found decompositions # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/detectors/isomorph/skip = TRUE # flag to indicate whether detector <isomorph> should be called on descendants of the current partialdec # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/isomorph/usefullrecall = FALSE # flag to indicate whether emphasis settings for detector <isomorph> should be overruled by normal settings # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/detectors/isomorph/overruleemphasis = FALSE # frequency the detector gets called in detection loop ,ie it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <isomorph> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/isomorph/freqcallround = 1 # maximum round the detector gets called in detection loop <isomorph> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/isomorph/maxcallround = 0 # minimum round the detector gets called in detection loop <isomorph> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/isomorph/mincallround = 0 # frequency the detector gets called in detection loop,i.e., it is called in round r if and only if minCallRound <= r <= maxCallRound AND (r - minCallRound) mod freqCallRound == 0 <isomorph> # [type: int, advanced: FALSE, range: [0,2147483647], default: 1] detection/detectors/isomorph/origfreqcallround = 1 # maximum round the detector gets called in detection loop <isomorph> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/isomorph/origmaxcallround = 0 # minimum round the detector gets called in detection loop <isomorph> # [type: int, advanced: FALSE, range: [0,2147483647], default: 0] detection/detectors/isomorph/origmincallround = 0 # priority of detector <isomorph> # [type: int, advanced: FALSE, range: [-2147483648,2147483647], default: 100] detection/detectors/isomorph/priority = 100 # Maximum number of solutions/decompositions with exact detection # [type: int, advanced: FALSE, range: [0,2147483647], default: 6] detection/detectors/isomorph/maxdecompsexact = 6 # Maximum number of solutions/decompositions with extended detection # [type: int, advanced: FALSE, range: [0,2147483647], default: 4] detection/detectors/isomorph/maxdecompsextend = 4 # flag to indicate whether constraint classifier for <nnonezero entries> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/classification/consclassifier/nnonzeros/enabled = TRUE # flag to indicate whether constraint classifier for <scip constypes> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/classification/consclassifier/scipconstype/enabled = TRUE # flag to indicate whether constraint classifier for <miplib constypes> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/classification/consclassifier/miplibconstype/enabled = TRUE # flag to indicate whether constraint classifier for <constraint names (according to levenshtein distance graph)> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/classification/consclassifier/consnamelevenshtein/enabled = FALSE # flag to indicate whether constraint classifier for <constraint names (remove digits; check for identity)> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] detection/classification/consclassifier/consnamenonumbers/enabled = FALSE # flag to indicate whether constraint classifier for <domain in GAMS file> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/classification/consclassifier/gamsdomain/enabled = TRUE # flag to indicate whether constraint classifier for <symbol in GAMS file> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/classification/consclassifier/gamssymbol/enabled = TRUE # flag to indicate whether variable classifier for <domain in gams file> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/classification/varclassifier/gamsdomain/enabled = TRUE # flag to indicate whether variable classifier for <symbol in gams file> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/classification/varclassifier/gamssymbol/enabled = TRUE # flag to indicate whether variable classifier for <scipvartypes> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/classification/varclassifier/scipvartype/enabled = TRUE # flag to indicate whether variable classifier for <objective function values> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/classification/varclassifier/objectivevalues/enabled = TRUE # flag to indicate whether variable classifier for <objective function value signs> is enabled # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] detection/classification/varclassifier/objectivevaluesigns/enabled = TRUE # priority of heuristic <gcgcoefdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1001000] heuristics/gcgcoefdiving/priority = -1001000 # frequency for calling primal heuristic <gcgcoefdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/gcgcoefdiving/freq = 10 # frequency offset for calling primal heuristic <gcgcoefdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 1] heuristics/gcgcoefdiving/freqofs = 1 # maximal depth level to call primal heuristic <gcgcoefdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgcoefdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/gcgcoefdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/gcgcoefdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/gcgcoefdiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/gcgcoefdiving/maxlpiterofs = 1000 # maximal number of allowed pricing rounds (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] heuristics/gcgcoefdiving/maxpricerounds = 0 # perform pricing only if infeasibility is encountered # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgcoefdiving/usefarkasonly = FALSE # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/gcgcoefdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/gcgcoefdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/gcgcoefdiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/gcgcoefdiving/maxdiveavgquotnosol = 0 # try to branch the diving variable in the other direction in case of infeasibility # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcgcoefdiving/otherdirection = TRUE # single backtracking by choosing another variable in case of infeasibility # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgcoefdiving/backtrack = FALSE # maximal depth until which a limited discrepancy search is performed # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] heuristics/gcgcoefdiving/maxdiscdepth = 0 # maximal discrepancy allowed in backtracking and limited discrepancy search # [type: int, advanced: TRUE, range: [0,2147483647], default: 2] heuristics/gcgcoefdiving/maxdiscrepancy = 2 # calculate the number of locks w.r.t. the master LP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgcoefdiving/usemasterlocks = FALSE # priority of heuristic <gcgfracdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1003000] heuristics/gcgfracdiving/priority = -1003000 # frequency for calling primal heuristic <gcgfracdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/gcgfracdiving/freq = 10 # frequency offset for calling primal heuristic <gcgfracdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 3] heuristics/gcgfracdiving/freqofs = 3 # maximal depth level to call primal heuristic <gcgfracdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgfracdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/gcgfracdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/gcgfracdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/gcgfracdiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/gcgfracdiving/maxlpiterofs = 1000 # maximal number of allowed pricing rounds (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] heuristics/gcgfracdiving/maxpricerounds = 0 # perform pricing only if infeasibility is encountered # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgfracdiving/usefarkasonly = FALSE # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/gcgfracdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/gcgfracdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/gcgfracdiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/gcgfracdiving/maxdiveavgquotnosol = 0 # try to branch the diving variable in the other direction in case of infeasibility # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcgfracdiving/otherdirection = TRUE # single backtracking by choosing another variable in case of infeasibility # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgfracdiving/backtrack = FALSE # maximal depth until which a limited discrepancy search is performed # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] heuristics/gcgfracdiving/maxdiscdepth = 0 # maximal discrepancy allowed in backtracking and limited discrepancy search # [type: int, advanced: TRUE, range: [0,2147483647], default: 2] heuristics/gcgfracdiving/maxdiscrepancy = 2 # calculate the fractionalities w.r.t. the master LP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgfracdiving/usemasterfracs = FALSE # priority of heuristic <gcgguideddiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1007000] heuristics/gcgguideddiving/priority = -1007000 # frequency for calling primal heuristic <gcgguideddiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/gcgguideddiving/freq = 10 # frequency offset for calling primal heuristic <gcgguideddiving> # [type: int, advanced: FALSE, range: [0,65534], default: 7] heuristics/gcgguideddiving/freqofs = 7 # maximal depth level to call primal heuristic <gcgguideddiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgguideddiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/gcgguideddiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/gcgguideddiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/gcgguideddiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/gcgguideddiving/maxlpiterofs = 1000 # maximal number of allowed pricing rounds (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] heuristics/gcgguideddiving/maxpricerounds = 0 # perform pricing only if infeasibility is encountered # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgguideddiving/usefarkasonly = FALSE # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/gcgguideddiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/gcgguideddiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/gcgguideddiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/gcgguideddiving/maxdiveavgquotnosol = 0 # try to branch the diving variable in the other direction in case of infeasibility # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcgguideddiving/otherdirection = TRUE # single backtracking by choosing another variable in case of infeasibility # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgguideddiving/backtrack = FALSE # maximal depth until which a limited discrepancy search is performed # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] heuristics/gcgguideddiving/maxdiscdepth = 0 # maximal discrepancy allowed in backtracking and limited discrepancy search # [type: int, advanced: TRUE, range: [0,2147483647], default: 2] heuristics/gcgguideddiving/maxdiscrepancy = 2 # calculate the fractionalities w.r.t. the master LP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgguideddiving/usemasterfracs = FALSE # priority of heuristic <gcglinesdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1006000] heuristics/gcglinesdiving/priority = -1006000 # frequency for calling primal heuristic <gcglinesdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/gcglinesdiving/freq = 10 # frequency offset for calling primal heuristic <gcglinesdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 6] heuristics/gcglinesdiving/freqofs = 6 # maximal depth level to call primal heuristic <gcglinesdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcglinesdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/gcglinesdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/gcglinesdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/gcglinesdiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/gcglinesdiving/maxlpiterofs = 1000 # maximal number of allowed pricing rounds (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] heuristics/gcglinesdiving/maxpricerounds = 0 # perform pricing only if infeasibility is encountered # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcglinesdiving/usefarkasonly = FALSE # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/gcglinesdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/gcglinesdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/gcglinesdiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/gcglinesdiving/maxdiveavgquotnosol = 0 # try to branch the diving variable in the other direction in case of infeasibility # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcglinesdiving/otherdirection = TRUE # single backtracking by choosing another variable in case of infeasibility # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcglinesdiving/backtrack = FALSE # maximal depth until which a limited discrepancy search is performed # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] heuristics/gcglinesdiving/maxdiscdepth = 0 # maximal discrepancy allowed in backtracking and limited discrepancy search # [type: int, advanced: TRUE, range: [0,2147483647], default: 2] heuristics/gcglinesdiving/maxdiscrepancy = 2 # priority of heuristic <gcgpscostdiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1002000] heuristics/gcgpscostdiving/priority = -1002000 # frequency for calling primal heuristic <gcgpscostdiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/gcgpscostdiving/freq = 10 # frequency offset for calling primal heuristic <gcgpscostdiving> # [type: int, advanced: FALSE, range: [0,65534], default: 2] heuristics/gcgpscostdiving/freqofs = 2 # maximal depth level to call primal heuristic <gcgpscostdiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgpscostdiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/gcgpscostdiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/gcgpscostdiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/gcgpscostdiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/gcgpscostdiving/maxlpiterofs = 1000 # maximal number of allowed pricing rounds (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] heuristics/gcgpscostdiving/maxpricerounds = 0 # perform pricing only if infeasibility is encountered # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgpscostdiving/usefarkasonly = FALSE # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/gcgpscostdiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/gcgpscostdiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/gcgpscostdiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/gcgpscostdiving/maxdiveavgquotnosol = 0 # try to branch the diving variable in the other direction in case of infeasibility # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcgpscostdiving/otherdirection = TRUE # single backtracking by choosing another variable in case of infeasibility # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgpscostdiving/backtrack = FALSE # maximal depth until which a limited discrepancy search is performed # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] heuristics/gcgpscostdiving/maxdiscdepth = 0 # maximal discrepancy allowed in backtracking and limited discrepancy search # [type: int, advanced: TRUE, range: [0,2147483647], default: 2] heuristics/gcgpscostdiving/maxdiscrepancy = 2 # shall pseudocosts be calculated w.r.t. the master problem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgpscostdiving/usemasterpscosts = FALSE # priority of heuristic <gcgveclendiving> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1003100] heuristics/gcgveclendiving/priority = -1003100 # frequency for calling primal heuristic <gcgveclendiving> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/gcgveclendiving/freq = 10 # frequency offset for calling primal heuristic <gcgveclendiving> # [type: int, advanced: FALSE, range: [0,65534], default: 4] heuristics/gcgveclendiving/freqofs = 4 # maximal depth level to call primal heuristic <gcgveclendiving> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgveclendiving/maxdepth = -1 # minimal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 0] heuristics/gcgveclendiving/minreldepth = 0 # maximal relative depth to start diving # [type: real, advanced: TRUE, range: [0,1], default: 1] heuristics/gcgveclendiving/maxreldepth = 1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.05] heuristics/gcgveclendiving/maxlpiterquot = 0.05 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/gcgveclendiving/maxlpiterofs = 1000 # maximal number of allowed pricing rounds (-1: no limit) # [type: int, advanced: FALSE, range: [-1,2147483647], default: 0] heuristics/gcgveclendiving/maxpricerounds = 0 # perform pricing only if infeasibility is encountered # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgveclendiving/usefarkasonly = FALSE # maximal quotient (curlowerbound - lowerbound)/(cutoffbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.8] heuristics/gcgveclendiving/maxdiveubquot = 0.8 # maximal quotient (curlowerbound - lowerbound)/(avglowerbound - lowerbound) where diving is performed (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/gcgveclendiving/maxdiveavgquot = 0 # maximal UBQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1], default: 0.1] heuristics/gcgveclendiving/maxdiveubquotnosol = 0.1 # maximal AVGQUOT when no solution was found yet (0.0: no limit) # [type: real, advanced: TRUE, range: [0,1.79769313486232e+308], default: 0] heuristics/gcgveclendiving/maxdiveavgquotnosol = 0 # try to branch the diving variable in the other direction in case of infeasibility # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcgveclendiving/otherdirection = TRUE # single backtracking by choosing another variable in case of infeasibility # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgveclendiving/backtrack = FALSE # maximal depth until which a limited discrepancy search is performed # [type: int, advanced: TRUE, range: [0,2147483647], default: 0] heuristics/gcgveclendiving/maxdiscdepth = 0 # maximal discrepancy allowed in backtracking and limited discrepancy search # [type: int, advanced: TRUE, range: [0,2147483647], default: 2] heuristics/gcgveclendiving/maxdiscrepancy = 2 # calculate vector length scores w.r.t. the master LP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgveclendiving/usemasterscores = FALSE # priority of heuristic <gcgdins> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1105000] heuristics/gcgdins/priority = -1105000 # frequency for calling primal heuristic <gcgdins> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/gcgdins/freq = -1 # frequency offset for calling primal heuristic <gcgdins> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/gcgdins/freqofs = 0 # maximal depth level to call primal heuristic <gcgdins> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgdins/maxdepth = -1 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 5000] heuristics/gcgdins/nodesofs = 5000 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.05] heuristics/gcgdins/nodesquot = 0.05 # minimum number of nodes required to start the subproblem # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 500] heuristics/gcgdins/minnodes = 500 # number of pool-solutions to be checked for flag array update (for hard fixing of binary variables) # [type: int, advanced: FALSE, range: [1,2147483647], default: 5] heuristics/gcgdins/solnum = 5 # radius (using Manhattan metric) of the incumbent's neighborhood to be searched # [type: int, advanced: FALSE, range: [1,2147483647], default: 18] heuristics/gcgdins/neighborhoodsize = 18 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/gcgdins/maxnodes = 5000 # factor by which gcgdins should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/gcgdins/minimprove = 0.01 # number of nodes without incumbent change that heuristic should wait # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 0] heuristics/gcgdins/nwaitingnodes = 0 # should subproblem be created out of the rows in the LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgdins/uselprows = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcgdins/copycuts = TRUE # priority of heuristic <gcgfeaspump> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1000000] heuristics/gcgfeaspump/priority = -1000000 # frequency for calling primal heuristic <gcgfeaspump> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/gcgfeaspump/freq = -1 # frequency offset for calling primal heuristic <gcgfeaspump> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/gcgfeaspump/freqofs = 0 # maximal depth level to call primal heuristic <gcgfeaspump> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgfeaspump/maxdepth = -1 # maximal fraction of diving LP iterations compared to node LP iterations # [type: real, advanced: FALSE, range: [0,1.79769313486232e+308], default: 0.01] heuristics/gcgfeaspump/maxlpiterquot = 0.01 # factor by which the regard of the objective is decreased in each round, 1.0 for dynamic # [type: real, advanced: FALSE, range: [0,1], default: 1] heuristics/gcgfeaspump/objfactor = 1 # threshold difference for the convex parameter to perform perturbation # [type: real, advanced: FALSE, range: [0,1], default: 1] heuristics/gcgfeaspump/alphadiff = 1 # additional number of allowed LP iterations # [type: int, advanced: FALSE, range: [0,2147483647], default: 1000] heuristics/gcgfeaspump/maxlpiterofs = 1000 # total number of feasible solutions found up to which heuristic is called (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10] heuristics/gcgfeaspump/maxsols = 10 # maximal number of pumping loops (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10000] heuristics/gcgfeaspump/maxloops = 10000 # maximal number of pumping rounds without fractionality improvement (-1: no limit) # [type: int, advanced: TRUE, range: [-1,2147483647], default: 10] heuristics/gcgfeaspump/maxstallloops = 10 # minimum number of random variables to flip, if a 1-cycle is encountered # [type: int, advanced: TRUE, range: [1,2147483647], default: 10] heuristics/gcgfeaspump/minflips = 10 # maximum length of cycles to be checked explicitly in each round # [type: int, advanced: TRUE, range: [1,100], default: 3] heuristics/gcgfeaspump/cyclelength = 3 # number of iterations until a random perturbation is forced # [type: int, advanced: TRUE, range: [1,2147483647], default: 100] heuristics/gcgfeaspump/perturbfreq = 100 # radius (using Manhattan metric) of the neighborhood to be searched in stage 3 # [type: int, advanced: FALSE, range: [1,2147483647], default: 18] heuristics/gcgfeaspump/neighborhoodsize = 18 # should an iterative round-and-propagate scheme be used to find the integral points? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgfeaspump/usefp20 = FALSE # should a random perturbation be performed if a feasible solution was found? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcgfeaspump/pertsolfound = TRUE # should we solve a local branching sub-MIP if no solution could be found? # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgfeaspump/stage3 = FALSE # should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcgfeaspump/copycuts = TRUE # priority of heuristic <gcgrens> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1100000] heuristics/gcgrens/priority = -1100000 # frequency for calling primal heuristic <gcgrens> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/gcgrens/freq = 0 # frequency offset for calling primal heuristic <gcgrens> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/gcgrens/freqofs = 0 # maximal depth level to call primal heuristic <gcgrens> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgrens/maxdepth = -1 # minimum percentage of integer variables that have to be fixable # [type: real, advanced: FALSE, range: [0,1], default: 0.5] heuristics/gcgrens/minfixingrate = 0.5 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 5000] heuristics/gcgrens/maxnodes = 5000 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 500] heuristics/gcgrens/nodesofs = 500 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 500] heuristics/gcgrens/minnodes = 500 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/gcgrens/nodesquot = 0.1 # factor by which RENS should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/gcgrens/minimprove = 0.01 # should general integers get binary bounds [floor(.),ceil(.)] ? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcgrens/binarybounds = TRUE # should subproblem be created out of the rows in the LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgrens/uselprows = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcgrens/copycuts = TRUE # should all subproblem solutions be added to the original SCIP? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgrens/addallsols = FALSE # priority of heuristic <gcgrins> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1101000] heuristics/gcgrins/priority = -1101000 # frequency for calling primal heuristic <gcgrins> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 20] heuristics/gcgrins/freq = 20 # frequency offset for calling primal heuristic <gcgrins> # [type: int, advanced: FALSE, range: [0,65534], default: 5] heuristics/gcgrins/freqofs = 5 # maximal depth level to call primal heuristic <gcgrins> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgrins/maxdepth = -1 # number of nodes added to the contingent of the total nodes # [type: int, advanced: FALSE, range: [0,2147483647], default: 500] heuristics/gcgrins/nodesofs = 500 # maximum number of nodes to regard in the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 5000] heuristics/gcgrins/maxnodes = 5000 # minimum number of nodes required to start the subproblem # [type: int, advanced: TRUE, range: [0,2147483647], default: 500] heuristics/gcgrins/minnodes = 500 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/gcgrins/nodesquot = 0.1 # number of nodes without incumbent change that heuristic should wait # [type: int, advanced: TRUE, range: [0,2147483647], default: 200] heuristics/gcgrins/nwaitingnodes = 200 # factor by which gcgrins should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/gcgrins/minimprove = 0.01 # minimum percentage of integer variables that have to be fixed # [type: real, advanced: FALSE, range: [0,1], default: 0] heuristics/gcgrins/minfixingrate = 0 # should subproblem be created out of the rows in the LP rows? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/gcgrins/uselprows = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcgrins/copycuts = TRUE # priority of heuristic <gcgrounding> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1000] heuristics/gcgrounding/priority = -1000 # frequency for calling primal heuristic <gcgrounding> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] heuristics/gcgrounding/freq = 1 # frequency offset for calling primal heuristic <gcgrounding> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/gcgrounding/freqofs = 0 # maximal depth level to call primal heuristic <gcgrounding> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgrounding/maxdepth = -1 # number of calls per found solution that are considered as standard success, a higher factor causes the heuristic to be called more often # [type: int, advanced: TRUE, range: [-1,2147483647], default: 100] heuristics/gcgrounding/successfactor = 100 # priority of heuristic <gcgshifting> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -5000] heuristics/gcgshifting/priority = -5000 # frequency for calling primal heuristic <gcgshifting> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 10] heuristics/gcgshifting/freq = 10 # frequency offset for calling primal heuristic <gcgshifting> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/gcgshifting/freqofs = 0 # maximal depth level to call primal heuristic <gcgshifting> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgshifting/maxdepth = -1 # priority of heuristic <gcgsimplerounding> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: 0] heuristics/gcgsimplerounding/priority = 0 # frequency for calling primal heuristic <gcgsimplerounding> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 1] heuristics/gcgsimplerounding/freq = 1 # frequency offset for calling primal heuristic <gcgsimplerounding> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/gcgsimplerounding/freqofs = 0 # maximal depth level to call primal heuristic <gcgsimplerounding> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgsimplerounding/maxdepth = -1 # priority of heuristic <gcgzirounding> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -500] heuristics/gcgzirounding/priority = -500 # frequency for calling primal heuristic <gcgzirounding> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: -1] heuristics/gcgzirounding/freq = -1 # frequency offset for calling primal heuristic <gcgzirounding> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/gcgzirounding/freqofs = 0 # maximal depth level to call primal heuristic <gcgzirounding> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/gcgzirounding/maxdepth = -1 # determines maximum number of rounding loops # [type: int, advanced: TRUE, range: [0,2147483647], default: 2] heuristics/gcgzirounding/maxroundingloops = 2 # flag to determine if Zirounding is deactivated after a certain percentage of unsuccessful calls # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/gcgzirounding/stopziround = TRUE # if percentage of found solutions falls below this parameter, Zirounding will be deactivated # [type: real, advanced: TRUE, range: [0,1], default: 0.02] heuristics/gcgzirounding/stoppercentage = 0.02 # determines the minimum number of calls before percentage-based deactivation of Zirounding is applied # [type: int, advanced: TRUE, range: [1,2147483647], default: 1000] heuristics/gcgzirounding/minstopncalls = 1000 # priority of heuristic <xpcrossover> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1100500] heuristics/xpcrossover/priority = -1100500 # frequency for calling primal heuristic <xpcrossover> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/xpcrossover/freq = 0 # frequency offset for calling primal heuristic <xpcrossover> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/xpcrossover/freqofs = 0 # maximal depth level to call primal heuristic <xpcrossover> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/xpcrossover/maxdepth = -1 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 200] heuristics/xpcrossover/nodesofs = 200 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 1000] heuristics/xpcrossover/maxnodes = 1000 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 200] heuristics/xpcrossover/minnodes = 200 # number of extreme pts per block that will be taken into account # [type: int, advanced: FALSE, range: [2,2147483647], default: 4] heuristics/xpcrossover/nusedpts = 4 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/xpcrossover/nodesquot = 0.1 # minimum percentage of integer variables that have to be fixed # [type: real, advanced: FALSE, range: [0,1], default: 0.4] heuristics/xpcrossover/minfixingrate = 0.4 # factor by which crossover should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/xpcrossover/minimprove = 0.01 # should the choice which sols to take be randomized? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/xpcrossover/randomization = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/xpcrossover/copycuts = TRUE # priority of heuristic <xprins> # [type: int, advanced: TRUE, range: [-536870912,536870911], default: -1100600] heuristics/xprins/priority = -1100600 # frequency for calling primal heuristic <xprins> (-1: never, 0: only at depth freqofs) # [type: int, advanced: FALSE, range: [-1,65534], default: 0] heuristics/xprins/freq = 0 # frequency offset for calling primal heuristic <xprins> # [type: int, advanced: FALSE, range: [0,65534], default: 0] heuristics/xprins/freqofs = 0 # maximal depth level to call primal heuristic <xprins> (-1: no limit) # [type: int, advanced: TRUE, range: [-1,65534], default: -1] heuristics/xprins/maxdepth = -1 # minimum percentage of coincidence of relaxation and extreme pts # [type: real, advanced: FALSE, range: [0,1], default: 0.5] heuristics/xprins/equalityrate = 0.5 # number of nodes added to the contingent of the total nodes # [type: longint, advanced: FALSE, range: [0,9223372036854775807], default: 200] heuristics/xprins/nodesofs = 200 # maximum number of nodes to regard in the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 1000] heuristics/xprins/maxnodes = 1000 # minimum number of nodes required to start the subproblem # [type: longint, advanced: TRUE, range: [0,9223372036854775807], default: 200] heuristics/xprins/minnodes = 200 # number of extreme pts per block that will be taken into account (-1: all; 0: all which contribute to current relaxation solution) # [type: int, advanced: FALSE, range: [-1,2147483647], default: -1] heuristics/xprins/nusedpts = -1 # contingent of sub problem nodes in relation to the number of nodes of the original problem # [type: real, advanced: FALSE, range: [0,1], default: 0.1] heuristics/xprins/nodesquot = 0.1 # minimum percentage of integer variables that have to be fixed # [type: real, advanced: FALSE, range: [0,1], default: 0.4] heuristics/xprins/minfixingrate = 0.4 # factor by which crossover should at least improve the incumbent # [type: real, advanced: TRUE, range: [0,1], default: 0.01] heuristics/xprins/minimprove = 0.01 # should the choice which sols to take be randomized? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: FALSE] heuristics/xprins/randomization = FALSE # if uselprows == FALSE, should all active cuts from cutpool be copied to constraints in subproblem? # [type: bool, advanced: TRUE, range: {TRUE,FALSE}, default: TRUE] heuristics/xprins/copycuts = TRUE # display activation status of display column <solfound> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/solfound/active = 1 # display activation status of display column <time> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/time/active = 1 # display activation status of display column <nnodes> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/nnodes/active = 1 # display activation status of display column <nodesleft> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/nodesleft/active = 1 # display activation status of display column <lpiterations> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/lpiterations/active = 1 # display activation status of display column <lpavgiterations> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/lpavgiterations/active = 1 # display activation status of display column <lpcond> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/lpcond/active = 1 # display activation status of display column <memused> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/memused/active = 1 # display activation status of display column <depth> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/depth/active = 1 # display activation status of display column <maxdepth> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/maxdepth/active = 1 # display activation status of display column <plungedepth> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/plungedepth/active = 1 # display activation status of display column <nfrac> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/nfrac/active = 1 # display activation status of display column <nexternbranchcands> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/nexternbranchcands/active = 1 # display activation status of display column <vars> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/vars/active = 1 # display activation status of display column <conss> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/conss/active = 1 # display activation status of display column <curconss> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/curconss/active = 1 # display activation status of display column <curcols> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/curcols/active = 1 # display activation status of display column <currows> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/currows/active = 1 # display activation status of display column <cuts> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/cuts/active = 1 # display activation status of display column <separounds> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/separounds/active = 1 # display activation status of display column <poolsize> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/poolsize/active = 1 # display activation status of display column <conflicts> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/conflicts/active = 1 # display activation status of display column <strongbranchs> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/strongbranchs/active = 1 # display activation status of display column <pseudoobj> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/pseudoobj/active = 1 # display activation status of display column <lpobj> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/lpobj/active = 1 # display activation status of display column <curdualbound> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/curdualbound/active = 1 # display activation status of display column <estimate> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/estimate/active = 1 # display activation status of display column <avgdualbound> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/avgdualbound/active = 1 # display activation status of display column <dualbound> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/dualbound/active = 1 # display activation status of display column <primalbound> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/primalbound/active = 1 # display activation status of display column <cutoffbound> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/cutoffbound/active = 1 # display activation status of display column <gap> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/gap/active = 1 # display activation status of display column <primalgap> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 0] display/primalgap/active = 0 # display activation status of display column <nsols> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/nsols/active = 1 # display activation status of display column <mlpiterations> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/mlpiterations/active = 1 # display activation status of display column <mvars> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/mvars/active = 1 # display activation status of display column <mconss> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/mconss/active = 1 # display activation status of display column <mcuts> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/mcuts/active = 1 # display activation status of display column <degeneracy> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/degeneracy/active = 1 # display activation status of display column <sumlpiterations> (0: off, 1: auto, 2:on) # [type: int, advanced: FALSE, range: [0,2], default: 1] display/sumlpiterations/active = 1 # is statistics table <status> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/status/active = TRUE # is statistics table <timing> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/timing/active = TRUE # is statistics table <origprob> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/origprob/active = TRUE # is statistics table <presolvedprob> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/presolvedprob/active = TRUE # is statistics table <presolver> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/presolver/active = TRUE # is statistics table <constraint> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/constraint/active = TRUE # is statistics table <constiming> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/constiming/active = TRUE # is statistics table <propagator> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/propagator/active = TRUE # is statistics table <conflict> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/conflict/active = TRUE # is statistics table <separator> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/separator/active = TRUE # is statistics table <cutsel> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/cutsel/active = TRUE # is statistics table <pricer> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/pricer/active = TRUE # is statistics table <branchrules> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/branchrules/active = TRUE # is statistics table <heuristics> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/heuristics/active = TRUE # is statistics table <compression> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/compression/active = TRUE # is statistics table <benders> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/benders/active = TRUE # is statistics table <exprhdlr> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/exprhdlr/active = TRUE # is statistics table <lp> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/lp/active = TRUE # is statistics table <nlp> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/nlp/active = TRUE # is statistics table <nlpi> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/nlpi/active = TRUE # is statistics table <relaxator> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/relaxator/active = TRUE # is statistics table <tree> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/tree/active = TRUE # is statistics table <root> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/root/active = TRUE # is statistics table <solution> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/solution/active = TRUE # is statistics table <concurrentsolver> active # [type: bool, advanced: FALSE, range: {TRUE,FALSE}, default: TRUE] table/concurrentsolver/active = TRUE