brtzsnr wrote:Base is the score without any adjustments. It doesn't make sense to change it because you want to know when a better at of parameters was found.
For the other thing: NormFloat64 produces both positive and negative numbers. It is Gaussian distribution with mean 0 and standard deviation 1. Nevertheless I'll try with your code. Thanks.
Hi,
do you plan to further improve upon this?
I noticed that most often only the first value/parameter is being changed, and the others remain unchanged. And depending on the first set of values, I very often get a different set of optimized values when repeating the tuning.
Therefore I changed txt.go to loop over all values and do an estimation run for each parameter. (hopefully no bug)
Here is the code:
Code: Select all
// Run does a hill-climbing to find the base configuration.
func Run(format, run string, opts []Option) {
flag.Parse()
log.SetFlags(log.Ltime | log.Lshortfile)
rand.Seed(time.Now().UnixNano())
var err error
var flogto *os.File
if *logto != "" {
flogto, err = os.Create(*logto)
if err != nil {
log.Fatal(err)
}
defer flogto.Close()
}
// Setup evaluator and compute the base score that needs to be improved.
tmpl, err := template.New("option").Parse(format)
if err != nil {
log.Fatal(err)
}
fit, err := newFitness(tmpl, run, flag.Args())
if err != nil {
log.Fatal(err)
}
base, err := fit.evaluate("txtrun", nil, nil)
if err != nil {
log.Fatal("evaluate: ", err)
}
// Try a few starting vectors.
vals := make([]float64, len(opts))
for i, o := range opts {
vals[i] = o.Min + (o.Max-o.Min)/4
}
// Do the optimization.
start := time.Now()
score := math.Inf(+1)
test := make([]float64, len(opts))
temp := make([]float64, len(opts))
for step := 0; step < *steps; step++ {
// Mutate current values.
// Pick few candidates with different mutation characteristics and estimate the score.
for i, o := range opts {
testScore := math.Inf(+1)
d := (o.Max-o.Min)/(10+float64(step/2))
if d < 1 {
d = float64(1)
}
for t := -1; t <= 1; t++ {
temp[i] = o.bound(vals[i] + float64(t)*d)
score1, err := fit.evaluate("txtest", opts, temp)
if err != nil {
log.Fatal(err)
}
if score1 < testScore {
testScore = score1
copy(test, temp)
}
}
// Test is a candidate, evaluate on the full set.
score1, err := fit.evaluate("txtrun", opts, test)
if err != nil {
log.Fatal("evaluate: ", err)
}
if score1+1e-8 < score { // Accepts candidates.
score = score1
vals[i] = test[i]
if score < base {
log.Println("#", step, "new score =", score1, "; base =", base)
write(os.Stdout, "", tmpl, opts, vals)
}
}
if flogto != nil {
fmt.Fprintln(flogto, time.Now().Sub(start).Seconds(), base, score, score1)
}
}
}
}
This works much better for me, and I get very similar optimum values when repeating the tuning process.
Maybe you want to give this a try as well.