# This was a plan for a "case study" file. The actual contents are # just unimplemented ideas as of yet!! Actual grammar to be used is # JSON, as in our earlier Hadoop experiments. It is just so easy to # parse using boost. So, these remain only as an idea roadmap: # Sections are marked up as in INI-files. Fields are not, though! [Case] # Field names are to end with ':' and be separated from value by whitespace. # One line for each field. One value for each name. # Any white space within the value is interpreted as a single ' '. # The 'tag:' field will identify the case study for LaTeX documentation etc. tag: sample1 # The 'brief:' field should contain a brief description of the case. brief: A first example # The 'type:' field determines the learning task. As of now, only # 'classification' is supported. case_type: classification [Dataset] file: iris.train # generate: 2dvshape -- maybe also like this? # Parameters for population size [Population] pop_size: 100 # Constraints (maximum layer sizes) on MLP architecture. [MLP_constraints] # Input and output sizes should be set according to data dimensions ("*"): max_size: *-3-4-* # Except that we probably want to do feature extraction somehow: #max_size: fixed-3-4-* #max_size: featuresel-3-4-* # How to initialize the MLPs of the first population. [MLP_init] init_size: *-3-4-* init_weights: uniform(-1,1) # Objective functions, aggregation, and algorithm(s) used in # local improvement [MLP_improvement] improvement_obj: mse improvement_alg: backprop/.8 prunesmall/.2 improvement_steps: 10 improvement_regc: adaptive # only ideas as of now: 'classwise_random_mse' # Objective functions used in global Pareto search [MLP_evolution] evolution_obj: classwise_misprediction number_neurons evolution_alg: weight_jog/adaptive weight_del/.2