Tensorflow 2 ValueError: Shapes (20, 1) and (20, 2) are incompatible in gym environment

心不动则不痛 提交于 2021-01-28 05:34:43

问题


Just for learning I wanted to test this code. But there is a problem in it. I do not understand the problem. It says: ValueError: Shapes (20, 1) and (20, 2) are incompatiblefrom the line loss = network.train_on_batch(states, discounted_rewards)

Maybe there is something new in Tensorflow that was not there, the it was implemented.

The code from the website: https://adventuresinmachinelearning.com/policy-gradient-tensorflow-2/

import gym
import tensorflow as tf
from tensorflow import keras
import numpy as np
import datetime as dt

#STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorFlowBook/TensorBoard/PolicyGradientCartPole'
GAMMA = 0.95

env = gym.make("CartPole-v0")
state_size = 4
num_actions = env.action_space.n

tf.keras.backend.set_floatx('float64')
network = keras.Sequential([
    keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
    keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
    keras.layers.Dense(num_actions, activation='softmax')
])
network.compile(loss='categorical_crossentropy',optimizer=keras.optimizers.Adam())

def get_action(network, state, num_actions):
    print(state.reshape((1, -1)))
    softmax_out = network(state.reshape((1, -1)))
    print(softmax_out)
    selected_action = np.random.choice(num_actions, p=softmax_out.numpy()[0])
    return selected_action


def update_network(network, rewards, states, actions, num_actions):
    reward_sum = 0
    discounted_rewards = []
    for reward in rewards[::-1]:  # reverse buffer r
        reward_sum = reward + GAMMA * reward_sum
        discounted_rewards.append(reward_sum)
    discounted_rewards.reverse()
    discounted_rewards = np.array(discounted_rewards)
    # standardise the rewards
    discounted_rewards -= np.mean(discounted_rewards)
    discounted_rewards /= np.std(discounted_rewards)
    states = np.vstack(states)

    print("States", states.shape)
    print(states)
    print("Rewards", discounted_rewards.shape)
    print(discounted_rewards)
    loss = network.train_on_batch(states, discounted_rewards)
    return loss


num_episodes = 10000000
#train_writer = tf.summary.create_file_writer(STORE_PATH + f"/PGCartPole_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
for episode in range(num_episodes):
    state = env.reset()
    rewards = []
    states = []
    actions = []
    while True:
        action = get_action(network, state, num_actions)
        new_state, reward, done, _ = env.step(action)
        states.append(state)
        rewards.append(reward)
        actions.append(action)

        if done:
            loss = update_network(network, rewards, states, actions, num_actions)
            tot_reward = sum(rewards)
            print(f"Episode: {episode}, Reward: {tot_reward}, avg loss: {loss:.5f}")
            with train_writer.as_default():
                tf.summary.scalar('reward', tot_reward, step=episode)
                tf.summary.scalar('avg loss', loss, step=episode)
            break

        state = new_state

来源:https://stackoverflow.com/questions/61932773/tensorflow-2-valueerror-shapes-20-1-and-20-2-are-incompatible-in-gym-envi

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!