Skip to content

Commit

Permalink
bug fixes
Browse files Browse the repository at this point in the history
An error encountered indicates that there was an attempt to access an index out of range in one of its slices. This error probably occurred because the i index in the Train function was used to access both inputs and targets directly, causing a problem when the number of iterations is greater than the number of input examples.
  • Loading branch information
simplyYan authored Jun 12, 2024
1 parent 5f067df commit 875d2d3
Showing 1 changed file with 49 additions and 44 deletions.
93 changes: 49 additions & 44 deletions src/GalaktaGlareNN/GalaktaGlareNN.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,65 +49,70 @@ func sigmoidDerivative(x float64) float64 {

func (nn *NeuralNetwork) Train(inputs, targets [][]float64, iterations int, learningRate float64) {
for i := 0; i < iterations; i++ {

hiddenLayer := make([]float64, nn.hiddenNeurons)
for j := 0; j < nn.hiddenNeurons; j++ {
var sum float64
for k := 0; k < nn.inputNeurons; k++ {
sum += inputs[i][k] * nn.weightsInput[k][j]
for n := 0; n < len(inputs); n++ {
// Feedforward process
hiddenLayer := make([]float64, nn.hiddenNeurons)
for j := 0; j < nn.hiddenNeurons; j++ {
var sum float64
for k := 0; k < nn.inputNeurons; k++ {
sum += inputs[n][k] * nn.weightsInput[k][j]
}
hiddenLayer[j] = sigmoid(sum)
}
hiddenLayer[j] = sigmoid(sum)
}

outputLayer := make([]float64, nn.outputNeurons)
for j := 0; j < nn.outputNeurons; j++ {
var sum float64
for k := 0; k < nn.hiddenNeurons; k++ {
sum += hiddenLayer[k] * nn.weightsOutput[k][j]
outputLayer := make([]float64, nn.outputNeurons)
for j := 0; j < nn.outputNeurons; j++ {
var sum float64
for k := 0; k < nn.hiddenNeurons; k++ {
sum += hiddenLayer[k] * nn.weightsOutput[k][j]
}
outputLayer[j] = sigmoid(sum)
}
outputLayer[j] = sigmoid(sum)
}

outputErrors := make([]float64, nn.outputNeurons)
for j := 0; j < nn.outputNeurons; j++ {
outputErrors[j] = targets[i][j] - outputLayer[j]
}
// Backpropagation process
outputErrors := make([]float64, nn.outputNeurons)
for j := 0; j < nn.outputNeurons; j++ {
outputErrors[j] = targets[n][j] - outputLayer[j]
}

outputGradients := make([]float64, nn.outputNeurons)
for j := 0; j < nn.outputNeurons; j++ {
outputGradients[j] = outputErrors[j] * sigmoidDerivative(outputLayer[j])
}
outputGradients := make([]float64, nn.outputNeurons)
for j := 0; j < nn.outputNeurons; j++ {
outputGradients[j] = outputErrors[j] * sigmoidDerivative(outputLayer[j])
}

hiddenErrors := make([]float64, nn.hiddenNeurons)
for j := 0; j < nn.hiddenNeurons; j++ {
var error float64
for k := 0; k < nn.outputNeurons; k++ {
error += outputGradients[k] * nn.weightsOutput[j][k]
hiddenErrors := make([]float64, nn.hiddenNeurons)
for j := 0; j < nn.hiddenNeurons; j++ {
var error float64
for k := 0; k < nn.outputNeurons; k++ {
error += outputGradients[k] * nn.weightsOutput[j][k]
}
hiddenErrors[j] = error
}
hiddenErrors[j] = error
}

hiddenGradients := make([]float64, nn.hiddenNeurons)
for j := 0; j < nn.hiddenNeurons; j++ {
hiddenGradients[j] = hiddenErrors[j] * sigmoidDerivative(hiddenLayer[j])
}
hiddenGradients := make([]float64, nn.hiddenNeurons)
for j := 0; j < nn.hiddenNeurons; j++ {
hiddenGradients[j] = hiddenErrors[j] * sigmoidDerivative(hiddenLayer[j])
}

for j := 0; j < nn.hiddenNeurons; j++ {
for k := 0; k < nn.outputNeurons; k++ {
change := outputGradients[k] * hiddenLayer[j] * learningRate
nn.weightsOutput[j][k] += change
// Update weights
for j := 0; j < nn.hiddenNeurons; j++ {
for k := 0; k < nn.outputNeurons; k++ {
change := outputGradients[k] * hiddenLayer[j] * learningRate
nn.weightsOutput[j][k] += change
}
}
}

for j := 0; j < nn.inputNeurons; j++ {
for k := 0; k < nn.hiddenNeurons; k++ {
change := hiddenGradients[k] * inputs[i][j] * learningRate
nn.weightsInput[j][k] += change
for j := 0; j < nn.inputNeurons; j++ {
for k := 0; k < nn.hiddenNeurons; k++ {
change := hiddenGradients[k] * inputs[n][j] * learningRate
nn.weightsInput[j][k] += change
}
}
}
}
}


func (nn *NeuralNetwork) Predict(input []float64) []float64 {
hiddenLayer := make([]float64, nn.hiddenNeurons)
for j := 0; j < nn.hiddenNeurons; j++ {
Expand All @@ -128,4 +133,4 @@ func (nn *NeuralNetwork) Predict(input []float64) []float64 {
}

return outputLayer
}
}

0 comments on commit 875d2d3

Please sign in to comment.