diff --git a/feed_generators/openai_research_blog.py b/feed_generators/openai_research_blog.py index c9453a8f09..2b1a6280c0 100644 --- a/feed_generators/openai_research_blog.py +++ b/feed_generators/openai_research_blog.py @@ -1,154 +1,124 @@ -import undetected_chromedriver as uc -from bs4 import BeautifulSoup -from datetime import datetime, timedelta -import pytz -from feedgen.feed import FeedGenerator -import time import logging +from email.utils import parsedate_to_datetime from pathlib import Path -# Set up logging +import requests +from bs4 import BeautifulSoup +from feedgen.feed import FeedGenerator + +RSS_URL = "https://openai.com/news/rss.xml" +BLOG_URL = "https://openai.com/news/research/" +CATEGORY = "Research" +FEED_NAME = "openai_research" + logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" ) logger = logging.getLogger(__name__) -def stable_fallback_date(identifier): - """Generate a stable date from a URL or title hash.""" - hash_val = abs(hash(identifier)) % 730 - epoch = datetime(2023, 1, 1, 0, 0, 0, tzinfo=pytz.UTC) - return epoch + timedelta(days=hash_val) +def get_project_root(): + """Get the project root directory.""" + return Path(__file__).parent.parent -def setup_selenium_driver(): - """Set up Selenium WebDriver with undetected-chromedriver.""" - options = uc.ChromeOptions() - options.add_argument("--headless") # Ensure headless mode is enabled - options.add_argument("--window-size=1920,1080") - options.add_argument("--disable-blink-features=AutomationControlled") - options.add_argument( - "--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" - ) - return uc.Chrome(options=options) - - -def fetch_news_content_selenium(url): - """Fetch the fully loaded HTML content of a webpage using Selenium.""" - driver = None - try: - logger.info(f"Fetching content from URL: {url}") - driver = setup_selenium_driver() - driver.get(url) - - # Log wait time - wait_time = 5 - logger.info(f"Waiting {wait_time} seconds for the page to fully load...") - time.sleep(wait_time) - - html_content = driver.page_source - logger.info("Successfully fetched HTML content") - return html_content - - except Exception as e: - logger.error(f"Error fetching content: {e}") - raise - finally: - if driver: - driver.quit() - - -def parse_openai_news_html(html_content): - """Parse the HTML content from OpenAI's Research News page.""" - soup = BeautifulSoup(html_content, "html.parser") - articles = [] - - # Extract news items that contain `/index` in the href - news_items = soup.select("a[href*='/index']") # Look for links containing '/index' - - for item in news_items: - try: - # Extract title - title_elem = item.select_one("div.line-clamp-4") - if not title_elem: - continue - title = title_elem.text.strip() - - # Extract link - link = "https://openai.com" + item["href"] - - # Extract date - date_elem = item.select_one("span.text-small") - if date_elem: - try: - date = datetime.strptime(date_elem.text.strip(), "%b %d, %Y") - date = date.replace(tzinfo=pytz.UTC) - except Exception: - logger.warning(f"Date parsing failed for article: {title}") - date = stable_fallback_date(link) - else: - date = stable_fallback_date(link) - - articles.append( - { - "title": title, - "link": link, - "date": date, - "category": "Research", - "description": title, - } - ) - except Exception as e: - logger.warning(f"Skipping an article due to parsing error: {e}") +def fetch_rss_content(url: str = RSS_URL) -> str: + """Fetch the official OpenAI news RSS feed.""" + logger.info("Fetching RSS content from %s", url) + headers = { + "User-Agent": ( + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) " + "AppleWebKit/537.36 (KHTML, like Gecko) " + "Chrome/122.0.0.0 Safari/537.36" + ) + } + response = requests.get(url, headers=headers, timeout=30) + response.raise_for_status() + return response.text + + +def parse_research_posts(rss_content: str) -> list[dict]: + """Parse Research-tagged items from the official OpenAI RSS feed.""" + soup = BeautifulSoup(rss_content, "xml") + posts = [] + + for item in soup.find_all("item"): + category = item.find("category") + if not category or category.get_text(strip=True) != CATEGORY: + continue + + title = item.find("title") + link = item.find("link") + description = item.find("description") + pub_date = item.find("pubDate") + + if not title or not link: + logger.warning("Skipping item missing title or link") continue - logger.info(f"Parsed {len(articles)} articles") - return articles + parsed_date = None + if pub_date and pub_date.get_text(strip=True): + parsed_date = parsedate_to_datetime(pub_date.get_text(strip=True)) + + posts.append( + { + "title": title.get_text(strip=True), + "link": link.get_text(strip=True), + "description": description.get_text(strip=True) if description else "", + "date": parsed_date, + "category": CATEGORY, + } + ) + logger.info("Parsed %s research posts", len(posts)) + return posts -def generate_rss_feed(articles, feed_name="openai_research"): - """Generate RSS feed from parsed articles.""" + +def generate_rss_feed(posts: list[dict]) -> FeedGenerator: + """Generate the RSS feed for OpenAI Research posts.""" fg = FeedGenerator() fg.title("OpenAI Research News") fg.description("Latest research news and updates from OpenAI") - fg.link(href="https://openai.com/news/research") + fg.link(href=BLOG_URL) fg.language("en") - for article in articles: + # Sort posts by date, newest first (handle None dates) + sorted_posts = sorted( + posts, + key=lambda p: p["date"] if p["date"] else "1970-01-01", + reverse=True + ) + + for post in sorted_posts: fe = fg.add_entry() - fe.title(article["title"]) - fe.link(href=article["link"]) - fe.description(article["description"]) - fe.published(article["date"]) - fe.category(term=article["category"]) + fe.title(post["title"]) + fe.link(href=post["link"]) + fe.description(post["description"]) + + if post["date"] is not None: + fe.published(post["date"]) + + fe.category(term=post["category"]) - logger.info("RSS feed generated successfully") return fg -def save_rss_feed(feed_generator, feed_name="openai_research"): - """Save RSS feed to an XML file.""" - feeds_dir = Path("feeds") +def main() -> None: + """Generate and save the OpenAI Research RSS feed.""" + rss_content = fetch_rss_content() + posts = parse_research_posts(rss_content) + + if not posts: + logger.warning("No research posts found in the OpenAI RSS feed") + # Still generate an empty feed (preserving structure) + posts = [] + + feeds_dir = get_project_root() / "feeds" feeds_dir.mkdir(exist_ok=True) - output_file = feeds_dir / f"feed_{feed_name}.xml" - feed_generator.rss_file(str(output_file), pretty=True) - logger.info(f"RSS feed saved to {output_file}") - return output_file - - -def main(): - """Main function to generate OpenAI Research News RSS feed.""" - url = "https://openai.com/news/research/?limit=500" - - try: - html_content = fetch_news_content_selenium(url) - articles = parse_openai_news_html(html_content) - if not articles: - logger.warning("No articles were parsed. Check your selectors.") - feed = generate_rss_feed(articles) - save_rss_feed(feed) - except Exception as e: - logger.error(f"Failed to generate RSS feed: {e}") + output_file = feeds_dir / f"feed_{FEED_NAME}.xml" + + generate_rss_feed(posts).rss_file(str(output_file), pretty=True) + logger.info("Saved RSS feed to %s", output_file) if __name__ == "__main__": diff --git a/feeds/feed_openai_research.xml b/feeds/feed_openai_research.xml index a5a225ba70..b88cb86205 100644 --- a/feeds/feed_openai_research.xml +++ b/feeds/feed_openai_research.xml @@ -2,11 +2,1230 @@ OpenAI Research News - https://openai.com/news/research + https://openai.com/news/research/ Latest research news and updates from OpenAI http://www.rssboard.org/rss-specification python-feedgen en - Thu, 19 Mar 2026 06:19:33 +0000 + Thu, 19 Mar 2026 07:24:53 +0000 + + Weight normalization: A simple reparameterization to accelerate training of deep neural networks + https://openai.com/index/weight-normalization + Research + Thu, 25 Feb 2016 08:00:00 +0000 + + + OpenAI Gym Beta + https://openai.com/index/openai-gym-beta + We’re releasing the public beta of OpenAI Gym, a toolkit for developing and comparing reinforcement learning (RL) algorithms. It consists of a growing suite of environments (from simulated robots to Atari games), and a site for comparing and reproducing results. + Research + Wed, 27 Apr 2016 07:00:00 +0000 + + + Generative models + https://openai.com/index/generative-models + This post describes four projects that share a common theme of enhancing or using generative models, a branch of unsupervised learning techniques in machine learning. In addition to describing our work, this post will tell you a bit more about generative models: what they are, why they are important, and where they might be going. + Research + Thu, 16 Jun 2016 07:00:00 +0000 + + + Infrastructure for deep learning + https://openai.com/index/infrastructure-for-deep-learning + Deep learning is an empirical science, and the quality of a group’s infrastructure is a multiplier on progress. Fortunately, today’s open-source ecosystem makes it possible for anyone to build great deep learning infrastructure. + Research + Mon, 29 Aug 2016 07:00:00 +0000 + + + Transfer from simulation to real world through learning deep inverse dynamics model + https://openai.com/index/transfer-from-simulation-to-real-world-through-learning-deep-inverse-dynamics-model + Research + Tue, 11 Oct 2016 07:00:00 +0000 + + + Extensions and limitations of the neural GPU + https://openai.com/index/extensions-and-limitations-of-the-neural-gpu + Research + Wed, 02 Nov 2016 07:00:00 +0000 + + + Variational lossy autoencoder + https://openai.com/index/variational-lossy-autoencoder + Research + Tue, 08 Nov 2016 08:00:00 +0000 + + + RL²: Fast reinforcement learning via slow reinforcement learning + https://openai.com/index/rl2 + Research + Wed, 09 Nov 2016 08:00:00 +0000 + + + A connection between generative adversarial networks, inverse reinforcement learning, and energy-based models + https://openai.com/index/a-connection-between-generative-adversarial-networks-inverse-reinforcement-learning-and-energy-based-models + Research + Fri, 11 Nov 2016 08:00:00 +0000 + + + On the quantitative analysis of decoder-based generative models + https://openai.com/index/on-the-quantitative-analysis-of-decoder-based-generative-models + Research + Mon, 14 Nov 2016 08:00:00 +0000 + + + #Exploration: A study of count-based exploration for deep reinforcement learning + https://openai.com/index/exploration + Research + Tue, 15 Nov 2016 08:00:00 +0000 + + + Universe + https://openai.com/index/universe + We’re releasing Universe, a software platform for measuring and training an AI’s general intelligence across the world’s supply of games, websites and other applications. + Research + Mon, 05 Dec 2016 08:00:00 +0000 + + + PixelCNN++: Improving the PixelCNN with discretized logistic mixture likelihood and other modifications + https://openai.com/index/pixelcnn-plus-plus + Research + Thu, 19 Jan 2017 08:00:00 +0000 + + + Third-person imitation learning + https://openai.com/index/third-person-imitation-learning + Research + Mon, 06 Mar 2017 08:00:00 +0000 + + + Prediction and control with temporal segment models + https://openai.com/index/prediction-and-control-with-temporal-segment-models + Research + Sun, 12 Mar 2017 08:00:00 +0000 + + + Emergence of grounded compositional language in multi-agent populations + https://openai.com/index/emergence-of-grounded-compositional-language-in-multi-agent-populations + Research + Wed, 15 Mar 2017 07:00:00 +0000 + + + Learning to communicate + https://openai.com/index/learning-to-communicate + In this post we’ll outline new OpenAI research in which agents develop their own language. + Research + Thu, 16 Mar 2017 07:00:00 +0000 + + + One-shot imitation learning + https://openai.com/index/one-shot-imitation-learning + Research + Tue, 21 Mar 2017 07:00:00 +0000 + + + Evolution strategies as a scalable alternative to reinforcement learning + https://openai.com/index/evolution-strategies + We’ve discovered that evolution strategies (ES), an optimization technique that’s been known for decades, rivals the performance of standard reinforcement learning (RL) techniques on modern RL benchmarks (e.g. Atari/MuJoCo), while overcoming many of RL’s inconveniences. + Research + Fri, 24 Mar 2017 07:00:00 +0000 + + + Spam detection in the physical world + https://openai.com/index/spam-detection-in-the-physical-world + We’ve created the world’s first Spam-detecting AI trained entirely in simulation and deployed on a physical robot. + Research + Sat, 01 Apr 2017 07:00:00 +0000 + + + Unsupervised sentiment neuron + https://openai.com/index/unsupervised-sentiment-neuron + We’ve developed an unsupervised system which learns an excellent representation of sentiment, despite being trained only to predict the next character in the text of Amazon reviews. + Research + Thu, 06 Apr 2017 07:00:00 +0000 + + + Stochastic Neural Networks for hierarchical reinforcement learning + https://openai.com/index/stochastic-neural-networks-for-hierarchical-reinforcement-learning + Research + Mon, 10 Apr 2017 07:00:00 +0000 + + + Equivalence between policy gradients and soft Q-learning + https://openai.com/index/equivalence-between-policy-gradients-and-soft-q-learning + Research + Fri, 21 Apr 2017 07:00:00 +0000 + + + Roboschool + https://openai.com/index/roboschool + We are releasing Roboschool: open-source software for robot simulation, integrated with OpenAI Gym. + Research + Mon, 15 May 2017 07:00:00 +0000 + + + Robots that learn + https://openai.com/index/robots-that-learn + We’ve created a robotics system, trained entirely in simulation and deployed on a physical robot, which can learn a new task after seeing it done once. + Research + Tue, 16 May 2017 07:00:00 +0000 + + + OpenAI Baselines: DQN + https://openai.com/index/openai-baselines-dqn + We’re open-sourcing OpenAI Baselines, our internal effort to reproduce reinforcement learning algorithms with performance on par with published results. We’ll release the algorithms over upcoming months; today’s release includes DQN and three of its variants. + Research + Wed, 24 May 2017 07:00:00 +0000 + + + UCB exploration via Q-ensembles + https://openai.com/index/ucb-exploration-via-q-ensembles + Research + Mon, 05 Jun 2017 07:00:00 +0000 + + + Learning to cooperate, compete, and communicate + https://openai.com/index/learning-to-cooperate-compete-and-communicate + Multiagent environments where agents compete for resources are stepping stones on the path to AGI. Multiagent environments have two useful properties: first, there is a natural curriculum—the difficulty of the environment is determined by the skill of your competitors (and if you’re competing against clones of yourself, the environment exactly matches your skill level). Second, a multiagent environment has no stable equilibrium: no matter how smart an agent is, there’s always pressure to get smarter. These environments have a very different feel from traditional environments, and it’ll take a lot more research before we become good at them. + Research + Thu, 08 Jun 2017 07:00:00 +0000 + + + Faster physics in Python + https://openai.com/index/faster-physics-in-python + We’re open-sourcing a high-performance Python library for robotic simulation using the MuJoCo engine, developed over our past year of robotics research. + Research + Wed, 28 Jun 2017 07:00:00 +0000 + + + Teacher–student curriculum learning + https://openai.com/index/teacher-student-curriculum-learning + Research + Sat, 01 Jul 2017 07:00:00 +0000 + + + Hindsight Experience Replay + https://openai.com/index/hindsight-experience-replay + Research + Wed, 05 Jul 2017 07:00:00 +0000 + + + Robust adversarial inputs + https://openai.com/index/robust-adversarial-inputs + We’ve created images that reliably fool neural network classifiers when viewed from varied scales and perspectives. This challenges a claim from last week that self-driving cars would be hard to trick maliciously since they capture images from multiple scales, angles, perspectives, and the like. + Research + Mon, 17 Jul 2017 07:00:00 +0000 + + + Proximal Policy Optimization + https://openai.com/index/openai-baselines-ppo + We’re releasing a new class of reinforcement learning algorithms, Proximal Policy Optimization (PPO), which perform comparably or better than state-of-the-art approaches while being much simpler to implement and tune. PPO has become the default reinforcement learning algorithm at OpenAI because of its ease of use and good performance. + Research + Thu, 20 Jul 2017 07:00:00 +0000 + + + Better exploration with parameter noise + https://openai.com/index/better-exploration-with-parameter-noise + We’ve found that adding adaptive noise to the parameters of reinforcement learning algorithms frequently boosts performance. This exploration method is simple to implement and very rarely decreases performance, so it’s worth trying on any problem. + Research + Thu, 27 Jul 2017 07:00:00 +0000 + + + Gathering human feedback + https://openai.com/index/gathering-human-feedback + RL-Teacher is an open-source implementation of our interface to train AIs via occasional human feedback rather than hand-crafted reward functions. The underlying technique was developed as a step towards safe AI systems, but also applies to reinforcement learning problems with rewards that are hard to specify. + Research + Thu, 03 Aug 2017 07:00:00 +0000 + + + Dota 2 + https://openai.com/index/dota-2 + We’ve created a bot which beats the world’s top professionals at 1v1 matches of Dota 2 under standard tournament rules. The bot learned the game from scratch by self-play, and does not use imitation learning or tree search. This is a step towards building AI systems which accomplish well-defined goals in messy, complicated situations involving real humans. + Research + Fri, 11 Aug 2017 07:00:00 +0000 + + + More on Dota 2 + https://openai.com/index/more-on-dota-2 + Our Dota 2 result shows that self-play can catapult the performance of machine learning systems from far below human level to superhuman, given sufficient compute. In the span of a month, our system went from barely matching a high-ranked player to beating the top pros and has continued to improve since then. Supervised deep learning systems can only be as good as their training datasets, but in self-play systems, the available data improves automatically as the agent gets better. + Research + Wed, 16 Aug 2017 07:00:00 +0000 + + + OpenAI Baselines: ACKTR & A2C + https://openai.com/index/openai-baselines-acktr-a2c + We’re releasing two new OpenAI Baselines implementations: ACKTR and A2C. A2C is a synchronous, deterministic variant of Asynchronous Advantage Actor Critic (A3C) which we’ve found gives equal performance. ACKTR is a more sample-efficient reinforcement learning algorithm than TRPO and A2C, and requires only slightly more computation than A2C per update. + Research + Fri, 18 Aug 2017 07:00:00 +0000 + + + Learning with opponent-learning awareness + https://openai.com/index/learning-with-opponent-learning-awareness + Research + Wed, 13 Sep 2017 07:00:00 +0000 + + + Learning to model other minds + https://openai.com/index/learning-to-model-other-minds + We’re releasing an algorithm which accounts for the fact that other agents are learning too, and discovers self-interested yet collaborative strategies like tit-for-tat in the iterated prisoner’s dilemma. This algorithm, Learning with Opponent-Learning Awareness (LOLA), is a small step towards agents that model other minds. + Research + Thu, 14 Sep 2017 07:00:00 +0000 + + + Nonlinear computation in deep linear networks + https://openai.com/index/nonlinear-computation-in-deep-linear-networks + Research + Fri, 29 Sep 2017 07:00:00 +0000 + + + Competitive self-play + https://openai.com/index/competitive-self-play + We’ve found that self-play allows simulated AIs to discover physical skills like tackling, ducking, faking, kicking, catching, and diving for the ball, without explicitly designing an environment with these skills in mind. Self-play ensures that the environment is always the right difficulty for an AI to improve. Taken alongside our Dota 2 self-play results, we have increasing confidence that self-play will be a core part of powerful AI systems in the future. + Research + Wed, 11 Oct 2017 07:00:00 +0000 + + + Meta-learning for wrestling + https://openai.com/index/meta-learning-for-wrestling + We show that for the task of simulated robot wrestling, a meta-learning agent can learn to quickly defeat a stronger non-meta-learning agent, and also show that the meta-learning agent can adapt to physical malfunction. + Research + Wed, 11 Oct 2017 07:00:00 +0000 + + + Domain randomization and generative models for robotic grasping + https://openai.com/index/domain-randomization-and-generative-models-for-robotic-grasping + Research + Tue, 17 Oct 2017 07:00:00 +0000 + + + Asymmetric actor critic for image-based robot learning + https://openai.com/index/asymmetric-actor-critic-for-image-based-robot-learning + Research + Wed, 18 Oct 2017 07:00:00 +0000 + + + Sim-to-real transfer of robotic control with dynamics randomization + https://openai.com/index/sim-to-real-transfer-of-robotic-control-with-dynamics-randomization + Research + Wed, 18 Oct 2017 07:00:00 +0000 + + + Generalizing from simulation + https://openai.com/index/generalizing-from-simulation + Our latest robotics techniques allow robot controllers, trained entirely in simulation and deployed on physical robots, to react to unplanned changes in the environment as they solve simple tasks. That is, we’ve used these techniques to build closed-loop systems rather than open-loop ones as before. + Research + Thu, 19 Oct 2017 07:00:00 +0000 + + + Learning a hierarchy + https://openai.com/index/learning-a-hierarchy + We’ve developed a hierarchical reinforcement learning algorithm that learns high-level actions useful for solving a range of tasks, allowing fast solving of tasks requiring thousands of timesteps. Our algorithm, when applied to a set of navigation problems, discovers a set of high-level actions for walking and crawling in different directions, which enables the agent to master new navigation tasks quickly. + Research + Thu, 26 Oct 2017 07:00:00 +0000 + + + Interpretable and pedagogical examples + https://openai.com/index/interpretable-and-pedagogical-examples + Research + Thu, 02 Nov 2017 07:00:00 +0000 + + + Learning sparse neural networks through L₀ regularization + https://openai.com/index/learning-sparse-neural-networks-through-l0-regularization + Research + Mon, 04 Dec 2017 08:00:00 +0000 + + + Block-sparse GPU kernels + https://openai.com/index/block-sparse-gpu-kernels + We’re releasing highly-optimized GPU kernels for an underexplored class of neural network architectures: networks with block-sparse weights. Depending on the chosen sparsity, these kernels can run orders of magnitude faster than cuBLAS or cuSPARSE. We’ve used them to attain state-of-the-art results in text sentiment analysis and generative modeling of text and images. + Research + Wed, 06 Dec 2017 08:00:00 +0000 + + + Scaling Kubernetes to 2,500 nodes + https://openai.com/index/scaling-kubernetes-to-2500-nodes + Research + Thu, 18 Jan 2018 08:00:00 +0000 + + + Requests for Research 2.0 + https://openai.com/index/requests-for-research-2 + We’re releasing a new batch of seven unsolved problems which have come up in the course of our research at OpenAI. + Research + Wed, 31 Jan 2018 08:00:00 +0000 + + + Discovering types for entity disambiguation + https://openai.com/index/discovering-types-for-entity-disambiguation + We’ve built a system for automatically figuring out which object is meant by a word by having a neural network decide if the word belongs to each of about 100 automatically-discovered “types” (non-exclusive categories). + Research + Wed, 07 Feb 2018 08:00:00 +0000 + + + Interpretable machine learning through teaching + https://openai.com/index/interpretable-machine-learning-through-teaching + We’ve designed a method that encourages AIs to teach each other with examples that also make sense to humans. Our approach automatically selects the most informative examples to teach a concept—for instance, the best images to describe the concept of dogs—and experimentally we found our approach to be effective at teaching both AIs + Research + Thu, 15 Feb 2018 08:00:00 +0000 + + + Multi-Goal Reinforcement Learning: Challenging robotics environments and request for research + https://openai.com/index/multi-goal-reinforcement-learning + Research + Mon, 26 Feb 2018 08:00:00 +0000 + + + Ingredients for robotics research + https://openai.com/index/ingredients-for-robotics-research + We’re releasing eight simulated robotics environments and a Baselines implementation of Hindsight Experience Replay, all developed for our research over the past year. We’ve used these environments to train models which work on physical robots. We’re also releasing a set of requests for robotics research. + Research + Mon, 26 Feb 2018 08:00:00 +0000 + + + Some considerations on learning to explore via meta-reinforcement learning + https://openai.com/index/some-considerations-on-learning-to-explore-via-meta-reinforcement-learning + Research + Sat, 03 Mar 2018 08:00:00 +0000 + + + Reptile: A scalable meta-learning algorithm + https://openai.com/index/reptile + We’ve developed a simple meta-learning algorithm called Reptile which works by repeatedly sampling a task, performing stochastic gradient descent on it, and updating the initial parameters towards the final parameters learned on that task. Reptile is the application of the Shortest Descent algorithm to the meta-learning setting, and is mathematically similar to first-order MAML (which is a version of the well-known MAML algorithm) that only needs black-box access to an optimizer such as SGD or Adam, with similar computational efficiency and performance. + Research + Wed, 07 Mar 2018 08:00:00 +0000 + + + On first-order meta-learning algorithms + https://openai.com/index/on-first-order-meta-learning-algorithms + Research + Thu, 08 Mar 2018 08:00:00 +0000 + + + Improving GANs using optimal transport + https://openai.com/index/improving-gans-using-optimal-transport + Research + Thu, 15 Mar 2018 07:00:00 +0000 + + + Variance reduction for policy gradient with action-dependent factorized baselines + https://openai.com/index/variance-reduction-for-policy-gradient-with-action-dependent-factorized-baselines + Research + Tue, 20 Mar 2018 07:00:00 +0000 + + + Retro Contest + https://openai.com/index/retro-contest + We’re launching a transfer learning contest that measures a reinforcement learning algorithm’s ability to generalize from previous experience. + Research + Thu, 05 Apr 2018 07:00:00 +0000 + + + Gotta Learn Fast: A new benchmark for generalization in RL + https://openai.com/index/gotta-learn-fast + Research + Tue, 10 Apr 2018 07:00:00 +0000 + + + Evolved Policy Gradients + https://openai.com/index/evolved-policy-gradients + We’re releasing an experimental metalearning approach called Evolved Policy Gradients, a method that evolves the loss function of learning agents, which can enable fast training on novel tasks. Agents trained with EPG can succeed at basic tasks at test time that were outside their training regime, like learning to navigate to an object on a different side of the room from where it was placed during training. + Research + Wed, 18 Apr 2018 07:00:00 +0000 + + + AI and compute + https://openai.com/index/ai-and-compute + We’re releasing an analysis showing that since 2012, the amount of compute used in the largest AI training runs has been increasing exponentially with a 3.4-month doubling time (by comparison, Moore’s Law had a 2-year doubling period)[^footnote-correction]. Since 2012, this metric has grown by more than 300,000x (a 2-year doubling period would yield only a 7x increase). Improvements in compute have been a key component of AI progress, so as long as this trend continues, it’s worth preparing for the implications of systems far outside today’s capabilities. + Research + Wed, 16 May 2018 07:00:00 +0000 + + + Gym Retro + https://openai.com/index/gym-retro + We’re releasing the full version of Gym Retro, a platform for reinforcement learning research on games. This brings our publicly-released game count from around 70 Atari games and 30 Sega games to over 1,000 games across a variety of backing emulators. We’re also releasing the tool we use to add new games to the platform. + Research + Fri, 25 May 2018 07:00:00 +0000 + + + GamePad: A learning environment for theorem proving + https://openai.com/index/gamepad + Research + Sat, 02 Jun 2018 07:00:00 +0000 + + + Learning policy representations in multiagent systems + https://openai.com/index/learning-policy-representations-in-multiagent-systems + Research + Sun, 17 Jun 2018 07:00:00 +0000 + + + Retro Contest: Results + https://openai.com/index/retro-contest-results + The first run of our Retro Contest—exploring the development of algorithms that can generalize from previous experience—is now complete. + Research + Fri, 22 Jun 2018 07:00:00 +0000 + + + OpenAI Five + https://openai.com/index/openai-five + Our team of five neural networks, OpenAI Five, has started to defeat amateur human teams at Dota 2. + Research + Mon, 25 Jun 2018 07:00:00 +0000 + + + Learning Montezuma’s Revenge from a single demonstration + https://openai.com/index/learning-montezumas-revenge-from-a-single-demonstration + We’ve trained an agent to achieve a high score of 74,500 on Montezuma’s Revenge from a single human demonstration, better than any previously published result. Our algorithm is simple: the agent plays a sequence of games starting from carefully chosen states from the demonstration, and learns from them by optimizing the game score using PPO, the same reinforcement learning algorithm that underpins OpenAI Five. + Research + Wed, 04 Jul 2018 07:00:00 +0000 + + + Glow: Better reversible generative models + https://openai.com/index/glow + We introduce Glow, a reversible generative model which uses invertible 1x1 convolutions. It extends previous work on reversible generative models and simplifies the architecture. Our model can generate realistic high resolution images, supports efficient sampling, and discovers features that can be used to manipulate attributes of data. We’re releasing code for the model and an online visualization tool so people can explore and build on these results. + Research + Mon, 09 Jul 2018 07:00:00 +0000 + + + Variational option discovery algorithms + https://openai.com/index/variational-option-discovery-algorithms + Research + Thu, 26 Jul 2018 07:00:00 +0000 + + + Learning dexterity + https://openai.com/index/learning-dexterity + We’ve trained a human-like robot hand to manipulate physical objects with unprecedented dexterity. + Research + Mon, 30 Jul 2018 07:00:00 +0000 + + + OpenAI Five Benchmark: Results + https://openai.com/index/openai-five-benchmark-results + Yesterday, OpenAI Five won a best-of-three against a team of 99.95th percentile Dota players: Blitz, Cap, Fogged, Merlini, and MoonMeander—four of whom have played Dota professionally—in front of a live audience and 100,000 concurrent livestream viewers. + Research + Mon, 06 Aug 2018 07:00:00 +0000 + + + Large-scale study of curiosity-driven learning + https://openai.com/index/large-scale-study-of-curiosity-driven-learning + Research + Mon, 13 Aug 2018 07:00:00 +0000 + + + The International 2018: Results + https://openai.com/index/the-international-2018-results + OpenAI Five lost two games against top Dota 2 players at The International in Vancouver this week, maintaining a good chance of winning for the first 20–35 minutes of both games. + Research + Thu, 23 Aug 2018 07:00:00 +0000 + + + FFJORD: Free-form continuous dynamics for scalable reversible generative models + https://openai.com/index/ffjord + Research + Tue, 02 Oct 2018 07:00:00 +0000 + + + Reinforcement learning with prediction-based rewards + https://openai.com/index/reinforcement-learning-with-prediction-based-rewards + We’ve developed Random Network Distillation (RND), a prediction-based method for encouraging reinforcement learning agents to explore their environments through curiosity, which for the first time exceeds average human performance on Montezuma’s Revenge. + Research + Wed, 31 Oct 2018 07:00:00 +0000 + + + Plan online, learn offline: Efficient learning and exploration via model-based control + https://openai.com/index/plan-online-learn-offline + Research + Mon, 05 Nov 2018 08:00:00 +0000 + + + Learning concepts with energy functions + https://openai.com/index/learning-concepts-with-energy-functions + We’ve developed an energy-based model that can quickly learn to identify and generate instances of concepts, such as near, above, between, closest, and furthest, expressed as sets of 2d points. Our model learns these concepts after only five demonstrations. We also show cross-domain transfer: we use concepts learned in a 2d particle environment to solve tasks on a 3-dimensional physics-based robot. + Research + Wed, 07 Nov 2018 08:00:00 +0000 + + + Spinning Up in Deep RL + https://openai.com/index/spinning-up-in-deep-rl + We’re releasing Spinning Up in Deep RL, an educational resource designed to let anyone learn to become a skilled practitioner in deep reinforcement learning. Spinning Up consists of crystal-clear examples of RL code, educational exercises, documentation, and tutorials. + Research + Thu, 08 Nov 2018 08:00:00 +0000 + + + Quantifying generalization in reinforcement learning + https://openai.com/index/quantifying-generalization-in-reinforcement-learning + We’re releasing CoinRun, a training environment which provides a metric for an agent’s ability to transfer its experience to novel situations and has already helped clarify a longstanding puzzle in reinforcement learning. CoinRun strikes a desirable balance in complexity: the environment is simpler than traditional platformer games like Sonic the Hedgehog but still poses a worthy generalization challenge for state of the art algorithms. + Research + Thu, 06 Dec 2018 08:00:00 +0000 + + + How AI training scales + https://openai.com/index/how-ai-training-scales + We’ve discovered that the gradient noise scale, a simple statistical metric, predicts the parallelizability of neural network training on a wide range of tasks. Since complex tasks tend to have noisier gradients, increasingly large batch sizes are likely to become useful in the future, removing one potential limit to further growth of AI systems. More broadly, these results show that neural network training need not be considered a mysterious art, but can be rigorized and systematized. + Research + Fri, 14 Dec 2018 08:00:00 +0000 + + + Computational limitations in robust classification and win-win results + https://openai.com/index/computational-limitations-in-robust-classification-and-win-win-results + Research + Mon, 04 Feb 2019 08:00:00 +0000 + + + Better language models and their implications + https://openai.com/index/better-language-models + We’ve trained a large-scale unsupervised language model which generates coherent paragraphs of text, achieves state-of-the-art performance on many language modeling benchmarks, and performs rudimentary reading comprehension, machine translation, question answering, and summarization—all without task-specific training. + Research + Thu, 14 Feb 2019 08:00:00 +0000 + + + Neural MMO: A massively multiagent game environment + https://openai.com/index/neural-mmo + We’re releasing a Neural MMO, a massively multiagent game environment for reinforcement learning agents. Our platform supports a large, variable number of agents within a persistent and open-ended task. The inclusion of many agents and species leads to better exploration, divergent niche formation, and greater overall competence. + Research + Mon, 04 Mar 2019 08:00:00 +0000 + + + Implicit generation and generalization methods for energy-based models + https://openai.com/index/energy-based-models + We’ve made progress towards stable and scalable training of energy-based models (EBMs) resulting in better sample quality and generalization ability than existing models. Generation in EBMs spends more compute to continually refine its answers and doing so can generate samples competitive with GANs at low temperatures, while also having mode coverage guarantees of likelihood-based models. We hope these findings stimulate further research into this promising class of models. + Research + Thu, 21 Mar 2019 07:00:00 +0000 + + + OpenAI Five defeats Dota 2 world champions + https://openai.com/index/openai-five-defeats-dota-2-world-champions + OpenAI Five is the first AI to beat the world champions in an esports game, having won two back-to-back games versus the world champion Dota 2 team, OG, at Finals this weekend. Both OpenAI Five and DeepMind’s AlphaStar had previously beaten good pros privately but lost their live pro matches, making this also the first time an AI has beaten esports pros on livestream. + Research + Mon, 15 Apr 2019 07:00:00 +0000 + + + Generative modeling with sparse transformers + https://openai.com/index/sparse-transformer + We’ve developed the Sparse Transformer, a deep neural network which sets new records at predicting what comes next in a sequence—whether text, images, or sound. It uses an algorithmic improvement of the attention mechanism to extract patterns from sequences 30x longer than possible previously. + Research + Tue, 23 Apr 2019 07:00:00 +0000 + + + MuseNet + https://openai.com/index/musenet + We’ve created MuseNet, a deep neural network that can generate 4-minute musical compositions with 10 different instruments, and can combine styles from country to Mozart to the Beatles. MuseNet was not explicitly programmed with our understanding of music, but instead discovered patterns of harmony, rhythm, and style by learning to predict the next token in hundreds of thousands of MIDI files. MuseNet uses the same general-purpose unsupervised technology as GPT-2, a large-scale transformer model trained to predict the next token in a sequence, whether audio or text. + Research + Thu, 25 Apr 2019 07:00:00 +0000 + + + GPT-2: 6-month follow-up + https://openai.com/index/gpt-2-6-month-follow-up + We’re releasing the 774 million parameter GPT-2 language model after the release of our small 124M model in February, staged release of our medium 355M model in May, and subsequent research with partners and the AI community into the model’s potential for misuse and societal benefit. We’re also releasing an open-source legal agreement to make it easier for organizations to initiate model-sharing partnerships with each other, and are publishing a technical report about our experience in coordinating with the wider AI research community on publication norms. + Research + Tue, 20 Aug 2019 07:00:00 +0000 + + + Emergent tool use from multi-agent interaction + https://openai.com/index/emergent-tool-use + We’ve observed agents discovering progressively more complex tool use while playing a simple game of hide-and-seek. Through training in our new simulated hide-and-seek environment, agents build a series of six distinct strategies and counterstrategies, some of which we did not know our environment supported. The self-supervised emergent complexity in this simple environment further suggests that multi-agent co-adaptation may one day produce extremely complex and intelligent behavior. + Research + Tue, 17 Sep 2019 07:00:00 +0000 + + + Solving Rubik’s Cube with a robot hand + https://openai.com/index/solving-rubiks-cube + We’ve trained a pair of neural networks to solve the Rubik’s Cube with a human-like robot hand. The neural networks are trained entirely in simulation, using the same reinforcement learning code as OpenAI Five paired with a new technique called Automatic Domain Randomization (ADR). The system can handle situations it never saw during training, such as being prodded by a stuffed giraffe. This shows that reinforcement learning isn’t just a tool for virtual tasks, but can solve physical-world problems requiring unprecedented dexterity. + Research + Tue, 15 Oct 2019 07:00:00 +0000 + + + GPT-2: 1.5B release + https://openai.com/index/gpt-2-1-5b-release + As the final model release of GPT-2’s staged release, we’re releasing the largest version (1.5B parameters) of GPT-2 along with code and model weights to facilitate detection of outputs of GPT-2 models. While there have been larger language models released since August, we’ve continued with our original staged release plan in order to provide the community with a test case of a full staged release process. We hope that this test case will be useful to developers of future powerful models, and we’re actively continuing the conversation with the AI community on responsible publication. + Research + Tue, 05 Nov 2019 08:00:00 +0000 + + + Procgen Benchmark + https://openai.com/index/procgen-benchmark + We’re releasing Procgen Benchmark, 16 simple-to-use procedurally-generated environments which provide a direct measure of how quickly a reinforcement learning agent learns generalizable skills. + Research + Tue, 03 Dec 2019 08:00:00 +0000 + + + Deep double descent + https://openai.com/index/deep-double-descent + We show that the double descent phenomenon occurs in CNNs, ResNets, and transformers: performance first improves, then gets worse, and then improves again with increasing model size, data size, or training time. This effect is often avoided through careful regularization. While this behavior appears to be fairly universal, we don’t yet fully understand why it happens, and view further study of this phenomenon as an important research direction. + Research + Thu, 05 Dec 2019 08:00:00 +0000 + + + Dota 2 with large scale deep reinforcement learning + https://openai.com/index/dota-2-with-large-scale-deep-reinforcement-learning + Research + Fri, 13 Dec 2019 08:00:00 +0000 + + + Scaling laws for neural language models + https://openai.com/index/scaling-laws-for-neural-language-models + Research + Thu, 23 Jan 2020 08:00:00 +0000 + + + OpenAI Microscope + https://openai.com/index/microscope + We’re introducing OpenAI Microscope, a collection of visualizations of every significant layer and neuron of eight vision “model organisms” which are often studied in interpretability. Microscope makes it easier to analyze the features that form inside these neural networks, and we hope it will help the research community as we move towards understanding these complicated systems. + Research + Tue, 14 Apr 2020 07:00:00 +0000 + + + Improving verifiability in AI development + https://openai.com/index/improving-verifiability + We’ve contributed to a multi-stakeholder report by 58 co-authors at 30 organizations, including the Centre for the Future of Intelligence, Mila, Schwartz Reisman Institute for Technology and Society, Center for Advanced Study in the Behavioral Sciences, and Center for Security and Emerging Technologies. This report describes 10 mechanisms to improve the verifiability of claims made about AI systems. Developers can use these tools to provide evidence that AI systems are safe, secure, fair, or privacy-preserving. Users, policymakers, and civil society can use these tools to evaluate AI development processes. + Research + Thu, 16 Apr 2020 07:00:00 +0000 + + + Jukebox + https://openai.com/index/jukebox + We’re introducing Jukebox, a neural net that generates music, including rudimentary singing, as raw audio in a variety of genres and artist styles. We’re releasing the model weights and code, along with a tool to explore the generated samples. + Research + Thu, 30 Apr 2020 07:00:00 +0000 + + + AI and efficiency + https://openai.com/index/ai-and-efficiency + We’re releasing an analysis showing that since 2012 the amount of compute needed to train a neural net to the same performance on ImageNet classification has been decreasing by a factor of 2 every 16 months. Compared to 2012, it now takes 44 times less compute to train a neural network to the level of AlexNet (by contrast, Moore’s Law would yield an 11x cost improvement over this period). Our results suggest that for AI tasks with high levels of recent investment, algorithmic progress has yielded more gains than classical hardware efficiency. + Research + Tue, 05 May 2020 07:00:00 +0000 + + + Language models are few-shot learners + https://openai.com/index/language-models-are-few-shot-learners + Research + Thu, 28 May 2020 07:00:00 +0000 + + + Image GPT + https://openai.com/index/image-gpt + We find that, just as a large transformer model trained on language can generate coherent text, the same exact model trained on pixel sequences can generate coherent image completions and samples. By establishing a correlation between sample quality and image classification accuracy, we show that our best generative model also contains features competitive with top convolutional nets in the unsupervised setting. + Research + Wed, 17 Jun 2020 07:00:00 +0000 + + + Generative language modeling for automated theorem proving + https://openai.com/index/generative-language-modeling-for-automated-theorem-proving + Research + Mon, 07 Sep 2020 07:00:00 +0000 + + + DALL·E: Creating images from text + https://openai.com/index/dall-e + We’ve trained a neural network called DALL·E that creates images from text captions for a wide range of concepts expressible in natural language. + Research + Tue, 05 Jan 2021 08:00:00 +0000 + + + CLIP: Connecting text and images + https://openai.com/index/clip + We’re introducing a neural network called CLIP which efficiently learns visual concepts from natural language supervision. CLIP can be applied to any visual classification benchmark by simply providing the names of the visual categories to be recognized, similar to the “zero-shot” capabilities of GPT-2 and GPT-3. + Research + Tue, 05 Jan 2021 08:00:00 +0000 + + + Scaling Kubernetes to 7,500 nodes + https://openai.com/index/scaling-kubernetes-to-7500-nodes + We’ve scaled Kubernetes clusters to 7,500 nodes, producing a scalable infrastructure for large models like GPT-3, CLIP, and DALL·E, but also for rapid small-scale iterative research such as Scaling Laws for Neural Language Models. + Research + Mon, 25 Jan 2021 08:00:00 +0000 + + + Understanding the capabilities, limitations, and societal impact of large language models + https://openai.com/index/understanding-the-capabilities-limitations-and-societal-impact-of-large-language-models + Research + Thu, 04 Feb 2021 08:00:00 +0000 + + + Multimodal neurons in artificial neural networks + https://openai.com/index/multimodal-neurons + We’ve discovered neurons in CLIP that respond to the same concept whether presented literally, symbolically, or conceptually. This may explain CLIP’s accuracy in classifying surprising visual renditions of concepts, and is also an important step toward understanding the associations and biases that CLIP and similar models learn. + Research + Thu, 04 Mar 2021 08:00:00 +0000 + + + Evaluating large language models trained on code + https://openai.com/index/evaluating-large-language-models-trained-on-code + Research + Wed, 07 Jul 2021 07:00:00 +0000 + + + Introducing Triton: Open-source GPU programming for neural networks + https://openai.com/index/triton + We’re releasing Triton 1.0, an open-source Python-like programming language which enables researchers with no CUDA experience to write highly efficient GPU code—most of the time on par with what an expert would be able to produce. + Research + Wed, 28 Jul 2021 07:00:00 +0000 + + + TruthfulQA: Measuring how models mimic human falsehoods + https://openai.com/index/truthfulqa + Research + Wed, 08 Sep 2021 07:00:00 +0000 + + + Solving math word problems + https://openai.com/index/solving-math-word-problems + We’ve trained a system that solves grade school math problems with nearly twice the accuracy of a fine-tuned GPT-3 model. It solves about 90% as many problems as real kids: a small sample of 9-12 year olds scored 60% on a test from our dataset, while our system scored 55% on those same problems. + Research + Fri, 29 Oct 2021 07:00:00 +0000 + + + WebGPT: Improving the factual accuracy of language models through web browsing + https://openai.com/index/webgpt + We’ve fine-tuned GPT-3 to more accurately answer open-ended questions using a text-based web browser. + Research + Thu, 16 Dec 2021 08:00:00 +0000 + + + Text and code embeddings by contrastive pre-training + https://openai.com/index/text-and-code-embeddings-by-contrastive-pre-training + Research + Mon, 24 Jan 2022 08:00:00 +0000 + + + Solving (some) formal math olympiad problems + https://openai.com/index/formal-math + We built a neural theorem prover for Lean that learned to solve a variety of challenging high-school olympiad problems, including problems from the AMC12 and AIME competitions, as well as two problems adapted from the IMO. + Research + Wed, 02 Feb 2022 08:00:00 +0000 + + + A research agenda for assessing the economic impacts of code generation models + https://openai.com/index/economic-impacts-research + Research + Thu, 03 Mar 2022 08:00:00 +0000 + + + Hierarchical text-conditional image generation with CLIP latents + https://openai.com/index/hierarchical-text-conditional-image-generation-with-clip-latents + Research + Wed, 13 Apr 2022 07:00:00 +0000 + + + Teaching models to express their uncertainty in words + https://openai.com/index/teaching-models-to-express-their-uncertainty-in-words + Research + Sat, 28 May 2022 07:00:00 +0000 + + + Techniques for training large neural networks + https://openai.com/index/techniques-for-training-large-neural-networks + Large neural networks are at the core of many recent advances in AI, but training them is a difficult engineering and research challenge which requires orchestrating a cluster of GPUs to perform a single synchronized calculation. + Research + Thu, 09 Jun 2022 07:00:00 +0000 + + + Evolution through large models + https://openai.com/index/evolution-through-large-models + Research + Fri, 17 Jun 2022 07:00:00 +0000 + + + Learning to play Minecraft with Video PreTraining + https://openai.com/index/vpt + We trained a neural network to play Minecraft by Video PreTraining (VPT) on a massive unlabeled video dataset of human Minecraft play, while using only a small amount of labeled contractor data. With fine-tuning, our model can learn to craft diamond tools, a task that usually takes proficient humans over 20 minutes (24,000 actions). Our model uses the native human interface of keypresses and mouse movements, making it quite general, and represents a step towards general computer-using agents. + Research + Thu, 23 Jun 2022 07:00:00 +0000 + + + DALL·E 2 pre-training mitigations + https://openai.com/index/dall-e-2-pre-training-mitigations + In order to share the magic of DALL·E 2 with a broad audience, we needed to reduce the risks associated with powerful image generation models. To this end, we put various guardrails in place to prevent generated images from violating our content policy. + Research + Tue, 28 Jun 2022 07:00:00 +0000 + + + Efficient training of language models to fill in the middle + https://openai.com/index/efficient-training-of-language-models-to-fill-in-the-middle + Research + Thu, 28 Jul 2022 07:00:00 +0000 + + + Introducing Whisper + https://openai.com/index/whisper + We’ve trained and are open-sourcing a neural net called Whisper that approaches human level robustness and accuracy on English speech recognition. + Research + Wed, 21 Sep 2022 07:00:00 +0000 + + + Scaling laws for reward model overoptimization + https://openai.com/index/scaling-laws-for-reward-model-overoptimization + Research + Wed, 19 Oct 2022 07:00:00 +0000 + + + Point-E: A system for generating 3D point clouds from complex prompts + https://openai.com/index/point-e + Research + Fri, 16 Dec 2022 08:00:00 +0000 + + + GPT-4 + https://openai.com/index/gpt-4-research + We’ve created GPT-4, the latest milestone in OpenAI’s effort in scaling up deep learning. GPT-4 is a large multimodal model (accepting image and text inputs, emitting text outputs) that, while less capable than humans in many real-world scenarios, exhibits human-level performance on various professional and academic benchmarks. + Research + Tue, 14 Mar 2023 07:00:00 +0000 + + + GPTs are GPTs: An early look at the labor market impact potential of large language models + https://openai.com/index/gpts-are-gpts + Research + Fri, 17 Mar 2023 07:00:00 +0000 + + + Democratic inputs to AI + https://openai.com/index/democratic-inputs-to-ai + Our nonprofit organization, OpenAI, Inc., is launching a program to award ten $100,000 grants to fund experiments in setting up a democratic process for deciding what rules AI systems should follow, within the bounds defined by the law. + Research + Thu, 25 May 2023 07:00:00 +0000 + + + Improving mathematical reasoning with process supervision + https://openai.com/index/improving-mathematical-reasoning-with-process-supervision + We've trained a model to achieve a new state-of-the-art in mathematical problem solving by rewarding each correct step of reasoning (“process supervision”) instead of simply rewarding the correct final answer (“outcome supervision”). In addition to boosting performance relative to outcome supervision, process supervision also has an important alignment benefit: it directly trains the model to produce a chain-of-thought that is endorsed by humans. + Research + Wed, 31 May 2023 07:00:00 +0000 + + + Building an early warning system for LLM-aided biological threat creation + https://openai.com/index/building-an-early-warning-system-for-llm-aided-biological-threat-creation + We’re developing a blueprint for evaluating the risk that a large language model (LLM) could aid someone in creating a biological threat. In an evaluation involving both biology experts and students, we found that GPT-4 provides at most a mild uplift in biological threat creation accuracy. While this uplift is not large enough to be conclusive, our finding is a starting point for continued research and community deliberation. + Research + Wed, 31 Jan 2024 08:00:00 +0000 + + + Video generation models as world simulators + https://openai.com/index/video-generation-models-as-world-simulators + We explore large-scale training of generative models on video data. Specifically, we train text-conditional diffusion models jointly on videos and images of variable durations, resolutions and aspect ratios. We leverage a transformer architecture that operates on spacetime patches of video and image latent codes. Our largest model, Sora, is capable of generating a minute of high fidelity video. Our results suggest that scaling video generation models is a promising path towards building general purpose simulators of the physical world. + Research + Thu, 15 Feb 2024 08:00:00 +0000 + + + The Instruction Hierarchy: Training LLMs to Prioritize Privileged Instructions + https://openai.com/index/the-instruction-hierarchy + Today's LLMs are susceptible to prompt injections, jailbreaks, and other attacks that allow adversaries to overwrite a model's original instructions with their own malicious prompts. + Research + Fri, 19 Apr 2024 19:00:00 +0000 + + + Understanding the source of what we see and hear online + https://openai.com/index/understanding-the-source-of-what-we-see-and-hear-online + Today we’re introducing new technology to help researchers identify content created by our tools and joining the Coalition for Content Provenance and Authenticity Steering Committee to promote industry standards. + Research + Tue, 07 May 2024 00:00:00 +0000 + + + Hello GPT-4o + https://openai.com/index/hello-gpt-4o + We’re announcing GPT-4 Omni, our new flagship model which can reason across audio, vision, and text in real time. + Research + Mon, 13 May 2024 10:05:00 +0000 + + + Extracting Concepts from GPT-4 + https://openai.com/index/extracting-concepts-from-gpt-4 + Using new techniques for scaling sparse autoencoders, we automatically identified 16 million patterns in GPT-4's computations. + Research + Thu, 06 Jun 2024 00:00:00 +0000 + + + Consistency Models + https://openai.com/index/consistency-models + Diffusion models have significantly advanced the fields of image, audio, and video generation, but they depend on an iterative sampling process that causes slow generation. + Research + Thu, 20 Jun 2024 00:00:00 +0000 + + + A Holistic Approach to Undesired Content Detection in the Real World + https://openai.com/index/a-holistic-approach-to-undesired-content-detection-in-the-real-world + We present a holistic approach to building a robust and useful natural language classification system for real-world content moderation. + Research + Thu, 20 Jun 2024 00:00:00 +0000 + + + Improved Techniques for Training Consistency Models + https://openai.com/index/improved-techniques-for-training-consistency-models + Consistency models are a nascent family of generative models that can sample high quality data in one step without the need for adversarial training. + Research + Thu, 20 Jun 2024 00:00:00 +0000 + + + OpenAI and Los Alamos National Laboratory announce research partnership + https://openai.com/index/openai-and-los-alamos-national-laboratory-work-together + OpenAI and Los Alamos National Laboratory are working to develop safety evaluations to assess and measure biological capabilities and risks associated with frontier models. + Research + Wed, 10 Jul 2024 06:30:00 +0000 + + + Prover-Verifier Games improve legibility of language model outputs + https://openai.com/index/prover-verifier-games-improve-legibility + Discover how prover-verifier games improve the legibility of language model outputs, making AI solutions clearer, easier to verify, and more trustworthy for both humans and machines. + Research + Wed, 17 Jul 2024 10:00:00 +0000 + + + GPT-4o mini: advancing cost-efficient intelligence + https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence + Introducing the most cost-efficient small model in the market + Research + Thu, 18 Jul 2024 10:00:00 +0000 + + + Improving Model Safety Behavior with Rule-Based Rewards + https://openai.com/index/improving-model-safety-behavior-with-rule-based-rewards + We've developed and applied a new method leveraging Rule-Based Rewards (RBRs) that aligns models to behave safely without extensive human data collection. + Research + Wed, 24 Jul 2024 09:00:00 +0000 + + + GPT-4o System Card External Testers Acknowledgements + https://openai.com/index/gpt-4o-system-card/external-testers-acknowledgements + GPT-4o system card external testers acknowledgements + Research + Thu, 08 Aug 2024 10:00:00 +0000 + + + Introducing SWE-bench Verified + https://openai.com/index/introducing-swe-bench-verified + We’re releasing a human-validated subset of SWE-bench that more reliably evaluates AI models’ ability to solve real-world software issues. + Research + Tue, 13 Aug 2024 10:00:00 +0000 + + + OpenAI o1 System Card External Testers Acknowledgements + https://openai.com/index/openai-o1-system-card/external-testers-acknowledgements + OpenAI o1 system card external testers acknowledgements + Research + Thu, 12 Sep 2024 10:00:00 +0000 + + + OpenAI o1-mini + https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning + Advancing cost-efficient reasoning + Research + Thu, 12 Sep 2024 10:01:00 +0000 + + + Learning to reason with LLMs + https://openai.com/index/learning-to-reason-with-llms + We are introducing OpenAI o1, a new large language model trained with reinforcement learning to perform complex reasoning. o1 thinks before it answers—it can produce a long internal chain of thought before responding to the user. + Research + Thu, 12 Sep 2024 10:02:00 +0000 + + + MLE-bench: Evaluating Machine Learning Agents on Machine Learning Engineering + https://openai.com/index/mle-bench + We introduce MLE-bench, a benchmark for measuring how well AI agents perform at machine learning engineering. + Research + Thu, 10 Oct 2024 10:00:00 +0000 + + + Evaluating fairness in ChatGPT + https://openai.com/index/evaluating-fairness-in-chatgpt + We've analyzed how ChatGPT responds to users based on their name, using AI research assistants to protect privacy. + Research + Tue, 15 Oct 2024 10:00:00 +0000 + + + Simplifying, stabilizing, and scaling continuous-time consistency models + https://openai.com/index/simplifying-stabilizing-and-scaling-continuous-time-consistency-models + We’ve simplified, stabilized, and scaled continuous-time consistency models, achieving comparable sample quality to leading diffusion models, while using only two sampling steps. + Research + Wed, 23 Oct 2024 10:00:00 +0000 + + + Introducing SimpleQA + https://openai.com/index/introducing-simpleqa + A factuality benchmark called SimpleQA that measures the ability for language models to answer short, fact-seeking questions. + Research + Wed, 30 Oct 2024 10:00:00 +0000 + + + Advancing red teaming with people and AI + https://openai.com/index/advancing-red-teaming-with-people-and-ai + Advancing red teaming with people and AI + Research + Thu, 21 Nov 2024 10:30:00 +0000 + + + OpenAI o1 System Card + https://openai.com/index/openai-o1-system-card + This report outlines the safety work carried out prior to releasing OpenAI o1 and o1-mini, including external red teaming and frontier risk evaluations according to our Preparedness Framework. + Research + Thu, 05 Dec 2024 10:00:00 +0000 + + + Trading inference-time compute for adversarial robustness + https://openai.com/index/trading-inference-time-compute-for-adversarial-robustness + Trading Inference-Time Compute for Adversarial Robustness + Research + Wed, 22 Jan 2025 10:00:00 +0000 + + + Computer-Using Agent + https://openai.com/index/computer-using-agent + A universal interface for AI to interact with the digital world. + Research + Thu, 23 Jan 2025 10:00:00 +0000 + + + OpenAI o3-mini + https://openai.com/index/openai-o3-mini + Pushing the frontier of cost-effective reasoning. + Research + Fri, 31 Jan 2025 11:00:00 +0000 + + + OpenAI o3-mini System Card + https://openai.com/index/o3-mini-system-card + This report outlines the safety work carried out for the OpenAI o3-mini model, including safety evaluations, external red teaming, and Preparedness Framework evaluations. + Research + Fri, 31 Jan 2025 11:00:00 +0000 + + + Introducing deep research + https://openai.com/index/introducing-deep-research + An agent that uses reasoning to synthesize large amounts of online information and complete multi-step research tasks for you. Available to Pro users today, Plus and Team next. + Research + Sun, 02 Feb 2025 16:00:00 +0000 + + + Why language models hallucinate + https://openai.com/index/why-language-models-hallucinate + OpenAI’s new research explains why language models hallucinate. The findings show how improved evaluations can enhance AI reliability, honesty, and safety. + Research + Fri, 05 Sep 2025 10:00:00 +0000 + + + How people are using ChatGPT + https://openai.com/index/how-people-are-using-chatgpt + New research from the largest study of ChatGPT use shows how the tool creates economic value through both personal and professional use. Adoption is broadening beyond early users, closing gaps and making AI a part of everyday life. + Research + Mon, 15 Sep 2025 03:00:00 +0000 + + + Sora 2 is here + https://openai.com/index/sora-2 + Our latest video generation model is more physically accurate, realistic, and controllable than prior systems. It also features synchronized dialogue and sound effects. Create with it in the new Sora app. + Research + Tue, 30 Sep 2025 00:00:00 +0000 + + + Defining and evaluating political bias in LLMs + https://openai.com/index/defining-and-evaluating-political-bias-in-llms + Learn how OpenAI evaluates political bias in ChatGPT through new real-world testing methods that improve objectivity and reduce bias. + Research + Thu, 09 Oct 2025 13:00:00 +0000 + + + Introducing IndQA + https://openai.com/index/introducing-indqa + OpenAI introduces IndQA, a new benchmark for evaluating AI systems in Indian languages. Built with domain experts, IndQA tests cultural understanding and reasoning across 12 languages and 10 knowledge areas. + Research + Mon, 03 Nov 2025 22:30:00 +0000 + + + Understanding neural networks through sparse circuits + https://openai.com/index/understanding-neural-networks-through-sparse-circuits + OpenAI is exploring mechanistic interpretability to understand how neural networks reason. Our new sparse model approach could make AI systems more transparent and support safer, more reliable behavior. + Research + Thu, 13 Nov 2025 10:00:00 +0000 + + + Early experiments in accelerating science with GPT-5 + https://openai.com/index/accelerating-science-gpt-5 + OpenAI introduces the first research cases showing how GPT-5 accelerates scientific progress across math, physics, biology, and computer science. Explore how AI and researchers collaborate to generate proofs, uncover new insights, and reshape the pace of discovery. + Research + Thu, 20 Nov 2025 00:00:00 +0000 + + + How confessions can keep language models honest + https://openai.com/index/how-confessions-can-keep-language-models-honest + OpenAI researchers are testing “confessions,” a method that trains models to admit when they make mistakes or act undesirably, helping improve AI honesty, transparency, and trust in model outputs. + Research + Wed, 03 Dec 2025 10:00:00 +0000 + + + Measuring AI’s capability to accelerate biological research + https://openai.com/index/accelerating-biological-research-in-the-wet-lab + OpenAI introduces a real-world evaluation framework to measure how AI can accelerate biological research in the wet lab. Using GPT-5 to optimize a molecular cloning protocol, the work explores both the promise and risks of AI-assisted experimentation. + Research + Tue, 16 Dec 2025 08:00:00 +0000 + + + Evaluating AI’s ability to perform scientific research tasks + https://openai.com/index/frontierscience + OpenAI introduces FrontierScience, a benchmark testing AI reasoning in physics, chemistry, and biology to measure progress toward real scientific research. + Research + Tue, 16 Dec 2025 09:00:00 +0000 + + + Evaluating chain-of-thought monitorability + https://openai.com/index/evaluating-chain-of-thought-monitorability + OpenAI introduces a new framework and evaluation suite for chain-of-thought monitorability, covering 13 evaluations across 24 environments. Our findings show that monitoring a model’s internal reasoning is far more effective than monitoring outputs alone, offering a promising path toward scalable control as AI systems grow more capable. + Research + Thu, 18 Dec 2025 12:00:00 +0000 + + + GPT-5 lowers the cost of cell-free protein synthesis + https://openai.com/index/gpt-5-lowers-protein-synthesis-cost + An autonomous lab combining OpenAI’s GPT-5 with Ginkgo Bioworks’ cloud automation cut cell-free protein synthesis costs by 40% through closed-loop experimentation. + Research + Thu, 05 Feb 2026 11:00:00 +0000 + + + GPT-5.2 derives a new result in theoretical physics + https://openai.com/index/new-result-theoretical-physics + A new preprint shows GPT-5.2 proposing a new formula for a gluon amplitude, later formally proved and verified by OpenAI and academic collaborators. + Research + Fri, 13 Feb 2026 11:00:00 +0000 + + + Introducing EVMbench + https://openai.com/index/introducing-evmbench + OpenAI and Paradigm introduce EVMbench, a benchmark evaluating AI agents’ ability to detect, patch, and exploit high-severity smart contract vulnerabilities. + Research + Wed, 18 Feb 2026 00:00:00 +0000 + + + Our First Proof submissions + https://openai.com/index/first-proof-submissions + We share our AI model’s proof attempts for the First Proof math challenge, testing research-grade reasoning on expert-level problems. + Research + Fri, 20 Feb 2026 14:30:00 +0000 + + + Why we no longer evaluate SWE-bench Verified + https://openai.com/index/why-we-no-longer-evaluate-swe-bench-verified + SWE-bench Verified is increasingly contaminated and mismeasures frontier coding progress. Our analysis shows flawed tests and training leakage. We recommend SWE-bench Pro. + Research + Mon, 23 Feb 2026 11:00:00 +0000 + + + Extending single-minus amplitudes to gravitons + https://openai.com/index/extending-single-minus-amplitudes-to-gravitons + A new preprint extends single-minus amplitudes to gravitons, with GPT-5.2 Pro helping derive and verify nonzero graviton tree amplitudes in quantum gravity. + Research + Wed, 04 Mar 2026 10:00:00 +0000 + + + Reasoning models struggle to control their chains of thought, and that’s good + https://openai.com/index/reasoning-models-chain-of-thought-controllability + OpenAI introduces CoT-Control and finds reasoning models struggle to control their chains of thought, reinforcing monitorability as an AI safety safeguard. + Research + Thu, 05 Mar 2026 10:00:00 +0000 + + + Improving instruction hierarchy in frontier LLMs + https://openai.com/index/instruction-hierarchy-challenge + IH-Challenge trains models to prioritize trusted instructions, improving instruction hierarchy, safety steerability, and resistance to prompt injection attacks. + Research + Tue, 10 Mar 2026 11:00:00 +0000 +