Last active
July 18, 2025 11:04
-
-
Save bigsnarfdude/b4319115a8bd302d053d52729f262c62 to your computer and use it in GitHub Desktop.
gpt2-ppo-training.ipynb
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| { | |
| "cells": [ | |
| { | |
| "attachments": {}, | |
| "cell_type": "markdown", | |
| "metadata": {}, | |
| "source": [ | |
| "# PPO GPT2 to generate controlled sentiment reviews\n", | |
| "Optimise GPT2 to produce IMDB movie reviews with controlled sentiment using a BERT sentiment classifier for rewards.\n" | |
| ] | |
| }, | |
| { | |
| "cell_type": "markdown", | |
| "metadata": {}, | |
| "source": [ | |
| "In this notebook we fine-tune GPT2 (small) to generate **controlled** movie reviews based on the IMDB dataset. The model gets the target sentiment and 5 tokens from a real review and is tasked to produce continuations with the targeted sentiment. The reward for the continuations is calculated with the logits of a BERT sentiment classifier. That reward is then used for PPO training." | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 1, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "%load_ext autoreload\n", | |
| "%autoreload 2" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 2, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "import random\n", | |
| "import torch\n", | |
| "import wandb\n", | |
| "import time\n", | |
| "import os\n", | |
| "from tqdm import tqdm\n", | |
| "import numpy as np\n", | |
| "import pandas as pd\n", | |
| "from random import choices\n", | |
| "import matplotlib.pyplot as plt\n", | |
| "\n", | |
| "tqdm.pandas()\n", | |
| "\n", | |
| "from datasets import load_dataset\n", | |
| "\n", | |
| "from transformers import AutoTokenizer, pipeline\n", | |
| "\n", | |
| "from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead, create_reference_model" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 3, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "sentiment_pipe_kwargs = {\"top_k\": None, \"function_to_apply\": \"none\"}\n", | |
| "\n", | |
| "config = PPOConfig(\n", | |
| " model_name=\"lvwerra/gpt2-imdb\", steps=51200, learning_rate=1.41e-5, remove_unused_columns=False, log_with=\"wandb\"\n", | |
| ")\n", | |
| "\n", | |
| "txt_in_len = 5\n", | |
| "txt_out_len = 20\n", | |
| "seed = 1" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 4, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "np.random.seed(seed)" | |
| ] | |
| }, | |
| { | |
| "cell_type": "markdown", | |
| "metadata": {}, | |
| "source": [ | |
| "We load the GPT2 model with a value head and the tokenizer. We load the model twice; the first model is optimized while the second model serves as a reference to calculate the KL-divergence from the starting point. This serves as an additional reward signal in the PPO training to make sure the optimized model does not deviate too much from the original language model." | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 5, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "gpt2_model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name)\n", | |
| "gpt2_model_ref = create_reference_model(gpt2_model)\n", | |
| "gpt2_tokenizer = AutoTokenizer.from_pretrained(config.model_name)\n", | |
| "\n", | |
| "gpt2_tokenizer.pad_token = gpt2_tokenizer.eos_token" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 6, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "data": { | |
| "text/plain": [ | |
| "Dataset({\n", | |
| " features: ['review', 'sentiment'],\n", | |
| " num_rows: 22578\n", | |
| "})" | |
| ] | |
| }, | |
| "execution_count": 6, | |
| "metadata": {}, | |
| "output_type": "execute_result" | |
| } | |
| ], | |
| "source": [ | |
| "from datasets import load_dataset\n", | |
| "# create the dataset\n", | |
| "#\n", | |
| "dataset = load_dataset(\"imdb\", split=\"train\")\n", | |
| "dataset = dataset.rename_columns({\"text\": \"review\", \"label\": \"sentiment\"})\n", | |
| "# make sure the comments are are at least 500 and trim to 1000\n", | |
| "dataset = dataset.filter(lambda x: len(x[\"review\"]) > 500, batched=False)\n", | |
| "dataset = dataset.map(lambda x: {\"review\": x[\"review\"][:1000]}, batched=False)\n", | |
| "\n", | |
| "dataset" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 7, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "dataset = dataset.map(\n", | |
| " lambda x: {\"input_ids\": gpt2_tokenizer.encode(\" \" + x[\"review\"], return_tensors=\"pt\")[0, :txt_in_len]},\n", | |
| " batched=False,\n", | |
| ")\n", | |
| "dataset = dataset.map(lambda x: {\"query\": gpt2_tokenizer.decode(x[\"input_ids\"])}, batched=False)\n", | |
| "dataset = dataset[:20480]\n", | |
| "\n", | |
| "from datasets import Dataset\n", | |
| "\n", | |
| "dataset = Dataset.from_dict(dataset)\n", | |
| "dataset.set_format(\"pytorch\")" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 8, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "data": { | |
| "text/plain": [ | |
| "tensor([ 770, 2646, 373, 2192, 7867])" | |
| ] | |
| }, | |
| "execution_count": 8, | |
| "metadata": {}, | |
| "output_type": "execute_result" | |
| } | |
| ], | |
| "source": [ | |
| "dataset[3][\"input_ids\"]" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 9, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "def collator(data):\n", | |
| " return dict((key, [d[key] for d in data]) for key in data[0])" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 10, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "name": "stderr", | |
| "output_type": "stream", | |
| "text": [ | |
| "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mvincentoh\u001b[0m (\u001b[33mbirs\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" | |
| ] | |
| }, | |
| { | |
| "data": { | |
| "text/html": [ | |
| "Tracking run with wandb version 0.16.6" | |
| ], | |
| "text/plain": [ | |
| "<IPython.core.display.HTML object>" | |
| ] | |
| }, | |
| "metadata": {}, | |
| "output_type": "display_data" | |
| }, | |
| { | |
| "data": { | |
| "text/html": [ | |
| "Run data is saved locally in <code>/home/user/Downloads/trl/examples/notebooks/wandb/</code>" | |
| ], | |
| "text/plain": [ | |
| "<IPython.core.display.HTML object>" | |
| ] | |
| }, | |
| "metadata": {}, | |
| "output_type": "display_data" | |
| }, | |
| { | |
| "data": { | |
| "text/html": [ | |
| "Syncing run <strong><a href='https://wandb.ai/' target=\"_blank\">different-firefly-4</a></strong> to <a href='https://wandb.ai/birs/trl' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/run' target=\"_blank\">docs</a>)<br/>" | |
| ], | |
| "text/plain": [ | |
| "<IPython.core.display.HTML object>" | |
| ] | |
| }, | |
| "metadata": {}, | |
| "output_type": "display_data" | |
| }, | |
| { | |
| "data": { | |
| "text/html": [ | |
| " View project at <a href='https://wandb.ail' target=\"_blank\">https://wandb.ai/</a>" | |
| ], | |
| "text/plain": [ | |
| "<IPython.core.display.HTML object>" | |
| ] | |
| }, | |
| "metadata": {}, | |
| "output_type": "display_data" | |
| }, | |
| { | |
| "data": { | |
| "text/html": [ | |
| " View run at <a href='https://wandb.ai/' target=\"_blank\">https://wandb.ai/</a>" | |
| ], | |
| "text/plain": [ | |
| "<IPython.core.display.HTML object>" | |
| ] | |
| }, | |
| "metadata": {}, | |
| "output_type": "display_data" | |
| } | |
| ], | |
| "source": [ | |
| "ppo_trainer = PPOTrainer(config, gpt2_model, gpt2_model_ref, gpt2_tokenizer, dataset, data_collator=collator)" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 11, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "if ppo_trainer.accelerator.num_processes == 1:\n", | |
| " device = 0 if torch.cuda.is_available() else \"cpu\" # to avoid a `pipeline` bug\n", | |
| "else:\n", | |
| " device = ppo_trainer.accelerator.device\n", | |
| "sentiment_pipe = pipeline(\"sentiment-analysis\", \"lvwerra/distilbert-imdb\", device=device)" | |
| ] | |
| }, | |
| { | |
| "cell_type": "markdown", | |
| "metadata": {}, | |
| "source": [ | |
| "The model outputs are the logits for the negative and positive class. We will use the logits for positive class as a reward signal for the language model." | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 12, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "data": { | |
| "text/plain": [ | |
| "[{'label': 'NEGATIVE', 'score': 2.3350484371185303},\n", | |
| " {'label': 'POSITIVE', 'score': -2.726576566696167}]" | |
| ] | |
| }, | |
| "execution_count": 12, | |
| "metadata": {}, | |
| "output_type": "execute_result" | |
| } | |
| ], | |
| "source": [ | |
| "text = \"this movie was really bad!!\"\n", | |
| "output = sentiment_pipe(text, **sentiment_pipe_kwargs)\n", | |
| "output" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 13, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "data": { | |
| "text/plain": [ | |
| "[{'label': 'POSITIVE', 'score': 2.557040214538574},\n", | |
| " {'label': 'NEGATIVE', 'score': -2.294790029525757}]" | |
| ] | |
| }, | |
| "execution_count": 13, | |
| "metadata": {}, | |
| "output_type": "execute_result" | |
| } | |
| ], | |
| "source": [ | |
| "text = \"this movie was really good!!\"\n", | |
| "output = sentiment_pipe(text, **sentiment_pipe_kwargs)\n", | |
| "output" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 14, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "data": { | |
| "text/plain": [ | |
| "[{'label': 'POSITIVE', 'score': 0.8562760949134827},\n", | |
| " {'label': 'NEGATIVE', 'score': -0.7086048722267151}]" | |
| ] | |
| }, | |
| "execution_count": 14, | |
| "metadata": {}, | |
| "output_type": "execute_result" | |
| } | |
| ], | |
| "source": [ | |
| "text = \"this movie was a documentary\"\n", | |
| "output = sentiment_pipe(text, **sentiment_pipe_kwargs)\n", | |
| "output" | |
| ] | |
| }, | |
| { | |
| "cell_type": "markdown", | |
| "metadata": {}, | |
| "source": [ | |
| "The resulting reward signal:" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 15, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "def extract_pipe_output(outputs):\n", | |
| " positive_logits = []\n", | |
| " for out in outputs:\n", | |
| " for element in out:\n", | |
| " if element[\"label\"] == \"POSITIVE\":\n", | |
| " positive_logits.append(torch.tensor(element[\"score\"]))\n", | |
| " return positive_logits" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 16, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "data": { | |
| "text/plain": [ | |
| "-0.7086048722267151" | |
| ] | |
| }, | |
| "execution_count": 16, | |
| "metadata": {}, | |
| "output_type": "execute_result" | |
| } | |
| ], | |
| "source": [ | |
| "output[1][\"score\"]" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 17, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "ctrl_str = [\"[negative]\", \"[neutral]\", \"[positive]\"]\n", | |
| "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # this should be handled by accelerate\n", | |
| "ctrl_tokens = dict((s, gpt2_tokenizer.encode(s, return_tensors=\"pt\").squeeze().to(device)) for s in ctrl_str)" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 18, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "data": { | |
| "text/plain": [ | |
| "{'[negative]': tensor([ 58, 31591, 60], device='cuda:0'),\n", | |
| " '[neutral]': tensor([ 58, 29797, 60], device='cuda:0'),\n", | |
| " '[positive]': tensor([ 58, 24561, 60], device='cuda:0')}" | |
| ] | |
| }, | |
| "execution_count": 18, | |
| "metadata": {}, | |
| "output_type": "execute_result" | |
| } | |
| ], | |
| "source": [ | |
| "ctrl_tokens" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 19, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "def pos_logit_to_reward(logit, task):\n", | |
| " \"\"\"\n", | |
| " Take the positive sentiment logit and scale it for the task.\n", | |
| " task [negative]: reward = -logit\n", | |
| " task [neutral]: reward = -2*abs(logit)+4\n", | |
| " task [positive]: reward = logit\n", | |
| " \"\"\"\n", | |
| " for i in range(len(logit)):\n", | |
| " if task[i] == \"[negative]\":\n", | |
| " logit[i] = -logit[i]\n", | |
| " elif task[i] == \"[neutral]\":\n", | |
| " logit[i] = -2 * torch.abs(logit[i]) + 4\n", | |
| " elif task[i] == \"[positive]\":\n", | |
| " pass\n", | |
| " else:\n", | |
| " raise ValueError(\"task has to be in [0, 1, 2]!\")\n", | |
| " return logit" | |
| ] | |
| }, | |
| { | |
| "cell_type": "markdown", | |
| "metadata": {}, | |
| "source": [ | |
| "The following examples show the rewards for the cases where the classifier logit is 4, -4 and 0 for the three targets `['negative]`, `['neutral]` and `['positive']`. The scaling is not perfect as it differs between neutral and the other two classes. This is something to further investigate in the future. Ideally, one would use the logit output for each class individually, but since there is no dedicated class for neutral this is a workaround." | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 20, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "name": "stdout", | |
| "output_type": "stream", | |
| "text": [ | |
| "['[negative]', '[neutral]', '[positive]']\n" | |
| ] | |
| } | |
| ], | |
| "source": [ | |
| "print(ctrl_str)" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 21, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "data": { | |
| "text/plain": [ | |
| "tensor([-4., -4., 4.])" | |
| ] | |
| }, | |
| "execution_count": 21, | |
| "metadata": {}, | |
| "output_type": "execute_result" | |
| } | |
| ], | |
| "source": [ | |
| "pos_logit_to_reward(torch.Tensor([4, 4, 4]), ctrl_str)" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 22, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "data": { | |
| "text/plain": [ | |
| "tensor([ 4., -4., -4.])" | |
| ] | |
| }, | |
| "execution_count": 22, | |
| "metadata": {}, | |
| "output_type": "execute_result" | |
| } | |
| ], | |
| "source": [ | |
| "pos_logit_to_reward(torch.Tensor([-4, -4, -4]), ctrl_str)" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 23, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "data": { | |
| "text/plain": [ | |
| "tensor([-0., 4., 0.])" | |
| ] | |
| }, | |
| "execution_count": 23, | |
| "metadata": {}, | |
| "output_type": "execute_result" | |
| } | |
| ], | |
| "source": [ | |
| "pos_logit_to_reward(torch.Tensor([0, 0, 0]), ctrl_str)" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 24, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "generation_kwargs = {\n", | |
| " \"min_length\": -1,\n", | |
| " \"top_k\": 0.0,\n", | |
| " \"top_p\": 1.0,\n", | |
| " \"do_sample\": True,\n", | |
| " \"pad_token_id\": gpt2_tokenizer.eos_token_id,\n", | |
| " \"max_new_tokens\": txt_out_len,\n", | |
| " \"eos_token_id\": -1,\n", | |
| "}" | |
| ] | |
| }, | |
| { | |
| "cell_type": "markdown", | |
| "metadata": {}, | |
| "source": [ | |
| "**Steps**\n", | |
| "\n", | |
| "The training loop consists of the following steps:\n", | |
| "1. Get a batch of queries and create random controls\n", | |
| "2. Get the query responses from the policy\n", | |
| "3. Join query and responses and tokenize for BERT analysis\n", | |
| "4. Get sentiments for query/responses from BERT\n", | |
| "5. Optimize policy with PPO using the (query, response, reward) triplet\n", | |
| "6. Log all the training statistics\n", | |
| "\n", | |
| "**Training time**\n", | |
| "\n", | |
| "This step takes **~1h** on a 4070 Ti Super GPU (10.5 VRAM) with the above specified settings" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": null, | |
| "metadata": {}, | |
| "outputs": [ | |
| { | |
| "name": "stderr", | |
| "output_type": "stream", | |
| "text": [ | |
| " 4%|███ | 7/160 [01:00<22:15, 8.73s/it]You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset\n", | |
| "100%|███████████████████████████████████████████████████████████████████| 160/160 [23:26<00:00, 8.79s/it]\n", | |
| " 19%|████████████▊ | 30/160 [04:23<19:02, 8.79s/it]" | |
| ] | |
| } | |
| ], | |
| "source": [ | |
| "for epoch in range(2):\n", | |
| " for batch in tqdm(ppo_trainer.dataloader):\n", | |
| " (logs, game_data,) = (\n", | |
| " dict(),\n", | |
| " dict(),\n", | |
| " )\n", | |
| "\n", | |
| " #### prepend a random control token\n", | |
| " task_list = choices(ctrl_str, k=config.batch_size)\n", | |
| " game_data[\"query\"] = [t + q for t, q in zip(task_list, batch[\"query\"])]\n", | |
| " query_tensors = [torch.cat((ctrl_tokens[t], input_ids)) for t, input_ids in zip(task_list, batch[\"input_ids\"])]\n", | |
| "\n", | |
| " #### get response from gpt2\n", | |
| " response_tensors = []\n", | |
| " for query in query_tensors:\n", | |
| " response = ppo_trainer.generate(query, **generation_kwargs)\n", | |
| " response_tensors.append(response.squeeze()[-txt_out_len:])\n", | |
| " game_data[\"response\"] = [gpt2_tokenizer.decode(r.squeeze()) for r in response_tensors]\n", | |
| "\n", | |
| " #### sentiment analysis\n", | |
| " texts = [q + r for q, r in zip(batch[\"query\"], game_data[\"response\"])]\n", | |
| " logits = extract_pipe_output(sentiment_pipe(texts, **sentiment_pipe_kwargs))\n", | |
| " rewards = pos_logit_to_reward(logits, task_list)\n", | |
| "\n", | |
| " #### Run PPO training\n", | |
| " t = time.time()\n", | |
| " stats = ppo_trainer.step(query_tensors, response_tensors, rewards)\n", | |
| "\n", | |
| " for cs in ctrl_str:\n", | |
| " key = \"env/reward_\" + cs.strip(\"[]\")\n", | |
| " stats[key] = np.mean([r.cpu().numpy() for r, t in zip(rewards, task_list) if t == cs])\n", | |
| " ppo_trainer.log_stats(stats, game_data, rewards)" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": null, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "for ctrl_s in ctrl_str:\n", | |
| " plt.hist(\n", | |
| " [r for r, t in zip(logs[\"env/reward_dist\"], task_list) if t == ctrl_s], density=True, alpha=0.5, label=ctrl_s\n", | |
| " )\n", | |
| "plt.legend(loc=\"best\")\n", | |
| "plt.title(\"reward distribution\")\n", | |
| "plt.grid(True)\n", | |
| "plt.show()" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": null, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "gpt2_model.save_pretrained(\"gpt2-imdb-ctrl\")\n", | |
| "gpt2_tokenizer.save_pretrained(\"gpt2-imdb-ctrl\")" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": null, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [] | |
| } | |
| ], | |
| "metadata": { | |
| "kernelspec": { | |
| "display_name": "Python 3 (ipykernel)", | |
| "language": "python", | |
| "name": "python3" | |
| }, | |
| "language_info": { | |
| "codemirror_mode": { | |
| "name": "ipython", | |
| "version": 3 | |
| }, | |
| "file_extension": ".py", | |
| "mimetype": "text/x-python", | |
| "name": "python", | |
| "nbconvert_exporter": "python", | |
| "pygments_lexer": "ipython3", | |
| "version": "3.10.12" | |
| }, | |
| "vscode": { | |
| "interpreter": { | |
| "hash": "d2cfb53525227c89f8d14fa784301fa46c451cc9223d94ccce9e17956835eea2" | |
| } | |
| } | |
| }, | |
| "nbformat": 4, | |
| "nbformat_minor": 4 | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment