Lewis Signaling Game for PettingZoo

Paper Review

review
compositionality
neural networks
signaling systems
language evolution
Author

Oren Bochman

Published

Wednesday, January 1, 2025

Keywords

compositionality naive compositionality language emergence deep learning neural networks signaling systems emergent languages topographic similarity positional disentanglement bag-of-symbols disentanglement information gap disentanglement

import gymnasium as gym
from gymnasium import spaces
import numpy as np
from pettingzoo.utils.env import AECEnv
from pettingzoo.utils import wrappers
from pettingzoo.utils.agent_selector import agent_selector
import pettingzoo

class Sender:
    def __init__(self, num_signals):
        self.num_signals = num_signals
        self.action_space = spaces.Discrete(num_signals)

    def act(self, observation):
        return self.action_space.sample()

class Receiver:
    def __init__(self, num_states):
        self.num_states = num_states
        self.action_space = spaces.Discrete(num_states)

    def act(self, observation):
        if observation == self.num_states:
            return self.action_space.sample()
        else:
            return observation

class LewisSignalingEnv(AECEnv):
    metadata = {"render_modes": ["human"], "name": "lewis_signaling_v0"}

    def __init__(self, num_signals=3, num_states=3, max_cycles=100,debug=False):
        super().__init__()
        self.debug = debug
        self.possible_agents = ["sender", "receiver"]
        self.agent_name_mapping = dict(zip(self.possible_agents, list(range(len(self.possible_agents)))))
        self.num_signals = num_signals
        self.num_states = num_states
        self.max_cycles = max_cycles
        self.sender = Sender(num_signals)
        self.receiver = Receiver(num_states)
        self.state = None
        self.signal = None
        self.cycles = 0

        self.observation_spaces = {
            "sender": spaces.Discrete(1),
            "receiver": spaces.Discrete(self.num_signals + 1)
        }
        self.action_spaces = {
            "sender": self.sender.action_space,
            "receiver": self.receiver.action_space,
        }

    def observation_space(self, agent):
        return self.observation_spaces[agent]

    def action_space(self, agent):
        return self.action_spaces[agent]

    def observe(self, agent):
        if agent == "sender":
            return 0
        elif agent == "receiver":
            if self.signal is None:
                return self.num_signals
            else:
                return self.signal
        else:
            raise ValueError(f"Unknown agent: {agent}")

    def reset(self, seed=None, options=None):
        self.agents = self.possible_agents[:]
        self.rewards = {agent: 0 for agent in self.agents}
        self._cumulative_rewards = {agent: 0 for agent in self.agents}
        self.terminations = {agent: False for agent in self.agents}
        self.truncations = {agent: False for agent in self.agents}
        self.infos = {agent: {} for agent in self.agents}
        self.state = np.random.randint(self.num_states)
        self.signal = None
        self.cycles = 0
        self._agent_selector = agent_selector(self.agents)
        self.agent_selection = self._agent_selector.next()
        self._clear_rewards() # Clear rewards in reset
        return {agent: self.observe(agent) for agent in self.agents}


    def step(self, action):
        
        if self.terminations["sender"] or self.terminations["receiver"]:
            return

        current_agent = self.agent_selection

        if current_agent == "sender":

            self.signal = action
            if self.debug:
                print(f"Sender sent signal: {self.signal}, State: {self.state}")
            self.agent_selection = self._agent_selector.next()
            return

        elif current_agent == "receiver":
            reward = 0
            guess = action
            if self.debug:
                print(f"Receiver guessed: {guess}, State: {self.state}, Signal: {self.signal}")
            if guess == self.state:
                reward = 1
                if self.debug:
                    print(f"Reward assigned: {reward}")                
            else:
                reward = 0

            for agent in self.agents:
                self.rewards[agent] = reward
                self._cumulative_rewards[agent] += self.rewards[agent]

            if self._agent_selector.is_last():
                self.cycles += 1
                if self.cycles >= self.max_cycles:
                    for agent in self.agents:
                        self.truncations[agent] = True
                self.state = np.random.randint(self.num_states)
                self._agent_selector.reinit(self.agents)
            else:
                self.agent_selection = self._agent_selector.next()


    def _clear_rewards(self):
        #print("Clearing rewards")  # Print when rewards are cleared
        super()._clear_rewards()

    def close(self):
        if hasattr(self, "_agent_selector"):
            del self._agent_selector
        pass

def env(**kwargs):
    env = LewisSignalingEnv(**kwargs)
    if pettingzoo.__version__ >= "1.18.1":
        env = wrappers.OrderEnforcingWrapper(env)
    else:
        env = wrappers.order_enforcing(env)
    env = wrappers.AssertOutOfBoundsWrapper(env)
    return env

# --- Main execution in the notebook ---
num_episodes = 10
mean_rewards = {"sender": 0, "receiver": 0}

env_instance = env(num_signals=3, num_states=3, max_cycles=10) # Reduced max cycles for faster testing

for episode in range(num_episodes):
    observations = env_instance.reset()
    unwrapped_env = env_instance.unwrapped
    print(f"Starting episode {episode+1}, New State: {unwrapped_env.state}")
    for agent in env_instance.agent_iter():
        observation, reward, termination, truncation, info = env_instance.last()
        if termination or truncation:
            break
        if agent == "sender":
            action = env_instance.sender.act(observation)
        elif agent == "receiver":
            action = env_instance.receiver.act(observation)
        env_instance.step(action)

    # Calculate mean rewards AFTER the episode:
    for agent in env_instance.possible_agents:
        mean_rewards[agent] += env_instance.rewards[agent]

for agent in env_instance.possible_agents:
    mean_rewards[agent] /= num_episodes
print(f"Mean rewards over {num_episodes} episodes:")
print(f"Sender: {mean_rewards['sender']}")
print(f"Receiver: {mean_rewards['receiver']}")
Starting episode 1, New State: 2
Starting episode 2, New State: 0
Starting episode 3, New State: 0
Starting episode 4, New State: 0
Starting episode 5, New State: 2
Starting episode 6, New State: 2
Starting episode 7, New State: 1
Starting episode 8, New State: 1
Starting episode 9, New State: 0
Starting episode 10, New State: 2
Mean rewards over 10 episodes:
Sender: 0.2
Receiver: 0.2

The above is a basic version of the Lewis Signaling Game implemented in PettingZoo. The game consists of a sender and one or more receivers.

What would be nice is to:

  1. have agents that learn via various algorithms

    1. hammerstein
    2. Roth–Erev reinforcement (Has a Goldilocks property) similar a softmax policy with a linear preference. \begin{align} h'(a) & \leftarrow \alpha h(a) + \mathbb{1}_{a\ taken} r \\ \pi(a) & \leftarrow \frac{e^{h(a)/\tau}}{\sum_{a'} e^{h(a')/\tau}} \end{align}

    note: I re-interpreted A the update attraction A as the preference h, and \psi the forgetting/recency parameter as \alpha a learning rate as they are used as what goes into a Softmax which is parameterized by a preference in policy gradient methods.

    1. Bush–Mosteller Reinforcement similar to policy gradient with linear reward function :

    \pi'(a) \leftarrow \pi(a) + \alpha[\mathbb{1}_{a\ taken} R - \pi(a)]

    1. Bochman fastest coordination
    2. Bochman belief based coordination
    3. Bochman adaptive huffman coding coordination
    4. Bochman adaptive arithmetic coding coordination
    5. Tabular Monte Carlo RL
    6. Policy Gradient or Gradient Bandit
  2. expected return metrics for the signaling system

  3. entropy metrics for the signaling system

  4. topographic similarity metrics for the signaling system

  5. positional disentanglement metrics for the signaling system

  6. bag-of-symbols disentanglement metrics for the signaling system

  7. learning rate per cycle

  8. learning rather per state space size

  9. state space generators + distribution for states.

    1. simple -
    2. structured - group action for feature morphology
    3. structured and simple (generate atomic states, then combinations)
    4. trees - atoms and trees of atoms based on a one rule grammar.
    5. problem space - states and actions from an MDP.
  10. have multiple recievers that share information to speed up learning

  11. support for injecting errors in communication

  12. support for injecting risks into communication

  13. suport for different signal aggregation functions.

    1. bad of symbols
    2. sequence of symbols
    3. symbol parse trees ??
    4. DAGs ????
    5. custom - user defined

Citation

BibTeX citation:
@online{bochman2025,
  author = {Bochman, Oren},
  title = {Lewis {Signaling} {Game} for {PettingZoo}},
  date = {2025-01-01},
  url = {https://orenbochman.github.io/posts/2024/2024-10-10-marco-baoni-composionality/lewis-signaling-game-petting-zoo.html},
  langid = {en}
}
For attribution, please cite this work as:
Bochman, Oren. 2025. “Lewis Signaling Game for PettingZoo.” January 1, 2025. https://orenbochman.github.io/posts/2024/2024-10-10-marco-baoni-composionality/lewis-signaling-game-petting-zoo.html.