13 #ifndef MLPACK_METHODS_RL_SAC_HPP 14 #define MLPACK_METHODS_RL_SAC_HPP 17 #include <ensmallen.hpp> 58 typename EnvironmentType,
59 typename QNetworkType,
60 typename PolicyNetworkType,
62 typename ReplayType = RandomReplay<EnvironmentType>
90 QNetworkType& learningQ1Network,
91 PolicyNetworkType& policyNetwork,
92 ReplayType& replayMethod,
93 UpdaterType qNetworkUpdater = UpdaterType(),
94 UpdaterType policyNetworkUpdater = UpdaterType(),
95 EnvironmentType environment = EnvironmentType());
150 QNetworkType& learningQ1Network;
151 QNetworkType learningQ2Network;
154 QNetworkType targetQ1Network;
155 QNetworkType targetQ2Network;
158 PolicyNetworkType& policyNetwork;
161 ReplayType& replayMethod;
164 UpdaterType qNetworkUpdater;
165 #if ENS_VERSION_MAJOR >= 2 166 typename UpdaterType::template Policy<arma::mat, arma::mat>*
167 qNetworkUpdatePolicy;
171 UpdaterType policyNetworkUpdater;
172 #if ENS_VERSION_MAJOR >= 2 173 typename UpdaterType::template Policy<arma::mat, arma::mat>*
174 policyNetworkUpdatePolicy;
178 EnvironmentType environment;
200 #include "sac_impl.hpp"
typename EnvironmentType::Action ActionType
Convenient typedef for action.
void SelectAction()
Select an action, given an agent.
Linear algebra utility functions, generally performed on matrices or vectors.
Implementation of Soft Actor-Critic, a model-free off-policy actor-critic based deep reinforcement le...
The core includes that mlpack expects; standard C++ includes and Armadillo.
double Episode()
Execute an episode.
SAC(TrainingConfig &config, QNetworkType &learningQ1Network, PolicyNetworkType &policyNetwork, ReplayType &replayMethod, UpdaterType qNetworkUpdater=UpdaterType(), UpdaterType policyNetworkUpdater=UpdaterType(), EnvironmentType environment=EnvironmentType())
Create the SAC object with given settings.
void Update()
Update the Q and policy networks.
const StateType & State() const
Get the state of the agent.
size_t & TotalSteps()
Modify total steps from beginning.
void SoftUpdate(double rho)
Softly update the learning Q network parameters to the target Q network parameters.
bool & Deterministic()
Modify the training mode / test mode indicator.
const size_t & TotalSteps() const
Get total steps from beginning.
The mean squared error performance function measures the network's performance according to the mean ...
const ActionType & Action() const
Get the action of the agent.
StateType & State()
Modify the state of the agent.
const bool & Deterministic() const
Get the indicator of training mode / test mode.
typename EnvironmentType::State StateType
Convenient typedef for state.