I added saving the neuron network, when I use Train() in the while task.wait() loop it work perfectly fine. But when I use Run() instead (The loaded neuron network is supposed to be already trained before) , the AI is making significantly different decisions such as high value when lava is far away, low value when lava is close. This only happened when I use Run(). I am confused if I saved/loaded the network incorrectly.
local DataStoreService = game:GetService("DataStoreService")
local ServerScriptService = game:GetService("ServerScriptService")
-- Create/Get our datastore called "Networks"
local NetworkDataStore = DataStoreService:GetDataStore("Networks")
local OpenML = require(game:GetService("ServerScriptService").OpenML)
local States = {"Distance"}
local Actions = {"Jump"} -- Removed Idle State to keep it simple. Either jump or dont jump.
local Propagator = OpenML.Algorithms.Propagator
local MyNetwork = NetworkDataStore:GetAsync("MyNetwork")
local NeuralNetwork
if MyNetwork then
NeuralNetwork = OpenML.Resources.MLP.Decompress(MyNetwork, "ASCII")
print(NeuralNetwork)
else
NeuralNetwork = OpenML.Resources.MLP.new({ #States, 10, #Actions }, function() -- reduced hidden layer size to 1 instead of 2, doesnt need to be big only for simple actions
return math.random() * 3 - 1.5 --made it initialize with negative valeus (produces better output)
end)
print("NEW NETWORK")
end
setmetatable(NeuralNetwork, { __index = Propagator })
local ActivationFunction = OpenML.ActivationFunctions.TanH -- Changed Activation function from ReLU to TanH
-- ReLU suffers from Dying ReLU so we changed it to TanH
local DQL = OpenML.Algorithms.DQL.new()
DQL.OnForwardPropagation = function(states) return NeuralNetwork:ForwardPropagation(states, ActivationFunction) end
DQL.OnBackPropagation = function(activations, target) return NeuralNetwork:BackPropagation(activations, target, { ActivationFunction = ActivationFunction, LearningRate = 0.01 }) end -- changed the learning rate to 0.01
function Run()
local distance = (script.Parent:GetPivot().Position - workspace.Part.Position).Magnitude
local state = { distance }
local activations = NeuralNetwork:ForwardPropagation(state, ActivationFunction)
local actions = activations[#activations]
if actions[1] > 0.5 then -- If the first action is greater than the threshold 0.5 then it'll jump anything under then it doesnt jump
script.Parent.Humanoid.Jump = true
end
--print(distance)
print("Jump: "..actions[1])
return state
end
function Train()
local state = Run()
local distance = state[1]
DQL:Learn{
State = state,
Action = 1, -- Jump
Reward = distance < 7 and 1 or -1, -- changed distance from 5 to 7 so it jump faster
}
end
game:BindToClose(function()
warn("Game is shutting down.")
local CompressedASCII = OpenML.Resources.MLP.Compress(NeuralNetwork, "ASCII")
NetworkDataStore:SetAsync("MyNetwork", CompressedASCII)
warn("saved")
-- Add your custom logic here
end)
while task.wait() do -- changed time from 0.1 to just the server framerate
Run()
end