import gymnasium
from flexsim_env import FlexSimEnv
from stable_baselines3.common.env_checker import check_env
from stable_baselines3 import PPO
from stable_baselines3.common.env_util import make_vec_env

def main():
    print("Initializing FlexSim environment...")

    # Create a FlexSim OpenAI Gym Environment
    env = FlexSimEnv(
        flexsimPath = "C:/Program Files/FlexSim 2023 Update 2/program/flexsim.exe",
        modelPath = "C:/Users/GAIVOTA_FLEXSIM/Documents/FLEXSIM FELIPE CAPALBO/ESTUDOS ML/Exercicio ML Layout/ExercicioLayout.fsm",
        verbose = False,
        visible = False
        )
    check_env(env) # Check that an environment follows Gym API.

    # Training a baselines3 PPO model in the environment
    model = PPO("MlpPolicy", env, verbose=1)
    print("Training model...")
    model.learn(total_timesteps=10000)
    
    # save the model
    print("Saving model...")
    model.save("TreinamentoExercicioLayout")

    input("Waiting for input to do some test runs...")
    num_episodes = 10
    episode_rewards = []
    # Run test episodes using the trained model
    for _ in range(num_episodes):
        obs, info = env.reset()
        done = False
        total_reward = 0
        while not done:
            action, _ = model.predict(obs)
            obs, reward, done, truncated, _ = env.step(action)
            total_reward += reward
        episode_rewards.append(total_reward)
    env._release_flexsim()
    input("Waiting for input to close FlexSim...")
    env.close()


if __name__ == "__main__":
    main()