Compare commits

...

80 Commits

Author SHA1 Message Date
Paul Gauthier
8159cbf7d3 set version to 0.82.4.dev 2025-05-05 20:23:46 -07:00
Paul Gauthier
c23e609902 version bump to 0.82.3 2025-05-05 20:23:43 -07:00
Paul Gauthier
2d9ea25273 fix: Add newline after "Reply in {user_lang}" reminder 2025-05-05 19:17:36 -07:00
Paul Gauthier
7773bbc908 copy 2025-05-05 19:13:49 -07:00
Paul Gauthier
72476f0967 bump deps 2025-05-05 19:05:11 -07:00
Paul Gauthier
a9883ccc25 copy 2025-05-05 19:03:32 -07:00
Paul Gauthier
3b9b93a8a4 Merge branch 'main' of github.com:Aider-AI/aider 2025-05-05 19:03:07 -07:00
Paul Gauthier
f90b7bfb09 better 2025-05-05 19:02:43 -07:00
paul-gauthier
5e7ef6c50e Merge pull request #3931 from elohmeier/main 2025-05-02 07:21:29 -07:00
Enno Richter
fdc7be1318 docs: update lint/test pre-commit shell script example to use "$@" for proper argument handling 2025-05-02 05:49:02 +02:00
paul-gauthier
f00c1bf61b Merge pull request #3930 from erykwieliczko/main 2025-05-01 20:03:17 -07:00
Eryk Wieliczko
09030de0b5 Show dates on "Release history" chart 2025-05-02 04:38:38 +02:00
Paul Gauthier
bdba0ca1c5 refactor: Move shell command prompts to shell.py, add final reminders 2025-05-01 17:31:22 -07:00
Paul Gauthier (aider)
e17c7d938c refactor: add normalize_language and improve get_user_language to return readable names 2025-05-01 17:29:14 -07:00
Paul Gauthier (aider)
433f2908a0 feat: add language normalization to convert locale codes to names using Babel or fallback map 2025-05-01 17:28:56 -07:00
Paul Gauthier
9fa5f5ace1 refactor: remove redundant dump call from prompt formatting in base_coder.py 2025-05-01 17:28:55 -07:00
Paul Gauthier
849a379a8c refactor: Move lazy/overeager prompts to final reminders in system prompt 2025-05-01 17:24:14 -07:00
Paul Gauthier
e205629a94 Merge branch 'main' of github.com:Aider-AI/aider 2025-04-28 07:53:31 -07:00
Paul Gauthier
9351f37935 add google-generativeai, bump deps 2025-04-28 07:53:20 -07:00
paul-gauthier
7d185bb710 Merge pull request #3898 from acro5piano/main
fix: add suffix "md" when opening editor in InputOutput class
2025-04-27 06:59:11 -07:00
Kay Gosho
07759813ed fix: add suffix "md" when opening editor in InputOutput class 2025-04-27 16:06:32 +09:00
Paul Gauthier
591d294052 feat: Print changed file path and check before reading content 2025-04-25 07:58:51 -07:00
Paul Gauthier (aider)
df1a0c5b8d style: Apply linter to aider/watch.py 2025-04-25 07:54:35 -07:00
Paul Gauthier (aider)
e743394537 feat: Skip scanning files larger than 1MB for AI comments 2025-04-25 07:54:32 -07:00
Paul Gauthier
22f140ac05 feat: Add common file patterns to default gitignore spec 2025-04-25 07:54:31 -07:00
Paul Gauthier (aider)
25a303935c fix: Add overeager setting to Gemini 2.5 Pro models in model-settings.yml 2025-04-24 19:51:35 -07:00
Paul Gauthier (aider)
3bf20d4f7a feat: Set gemini 2.5 pro models to overeager: true 2025-04-24 19:51:14 -07:00
Paul Gauthier
45413ce815 copy 2025-04-24 07:18:35 -07:00
Paul Gauthier
c56e4a08d3 copy 2025-04-20 19:53:32 -07:00
Paul Gauthier (aider)
80515b69c1 fix: Strip b/ prefix when a path is /dev/null in diffs 2025-04-20 17:34:36 -07:00
Paul Gauthier
303645cffa copy 2025-04-20 17:30:06 -07:00
Paul Gauthier
b3d32f65d3 fixed quote 2025-04-20 17:29:42 -07:00
Paul Gauthier
7c0aac7454 chore: Update Gemini flash model alias to preview version 2025-04-20 16:43:35 -07:00
Paul Gauthier
7719eae023 copy 2025-04-20 16:41:58 -07:00
Paul Gauthier
5e210c700d fix: Handle filenames starting with fences or triple backticks correctly 2025-04-20 16:36:36 -07:00
Paul Gauthier (aider)
c6ce871700 style: Apply linter to editblock_coder.py 2025-04-20 16:25:59 -07:00
Paul Gauthier (aider)
f28504a2eb fix: Properly handle filenames starting with triple backticks 2025-04-20 16:25:55 -07:00
Paul Gauthier
48733a315b fix: Handle filenames starting with fence chars in editblock coder 2025-04-20 16:25:54 -07:00
Paul Gauthier (aider)
16fbff8de1 Feat: Add gemini-2.5-flash-preview-04-17 as editor/weak model name 2025-04-20 16:04:04 -07:00
Paul Gauthier (aider)
bbab0cea5e feat: Add model settings for gemini-2.5-flash-preview-04-17 models 2025-04-20 16:01:03 -07:00
Paul Gauthier
19de93ae39 fix: Update weak model name for gemini-2.5-pro-exp-03-25 2025-04-20 15:58:54 -07:00
Paul Gauthier
230e5065c1 feat: Add gemini-2.5-flash-preview-04-17 model and leaderboard entry 2025-04-20 15:47:34 -07:00
Paul Gauthier
c94340d493 less ram 2025-04-20 13:18:57 -07:00
Paul Gauthier
ac1ff231e0 better prompt 2025-04-20 12:38:09 -07:00
Paul Gauthier (aider)
5423ffe518 feat: Add UnifiedDiffSimpleCoder to coders module 2025-04-20 11:34:10 -07:00
Paul Gauthier (aider)
ba4d613cbc feat: Give udiff-simple its own prompts, inheriting from udiff prompts 2025-04-20 11:32:42 -07:00
Paul Gauthier
ab11118c8a feat: Add simple prompts for unified diff coder 2025-04-20 11:32:41 -07:00
Paul Gauthier (aider)
3ca3f39f1d feat: Add UnifiedDiffSimpleCoder with simpler prompt for code edits 2025-04-20 11:31:55 -07:00
Paul Gauthier
8c3f167e8c feat: Add simple unified diff coder 2025-04-20 11:31:55 -07:00
Paul Gauthier (aider)
1a4d3927e7 feat: Add --thinking-tokens option to benchmark script 2025-04-20 11:29:33 -07:00
Paul Gauthier
20a29e5cd1 copy 2025-04-20 11:12:54 -07:00
Paul Gauthier
51e0fff822 cleanup metadata that is now in litellm 2025-04-20 11:12:20 -07:00
Paul Gauthier (aider)
13b3e75d0e style: Apply linting to clean_metadata.py 2025-04-20 11:11:01 -07:00
Paul Gauthier (aider)
de28178369 feat: Add dummy key to JSON dumps for diffing, then remove it. 2025-04-20 11:10:58 -07:00
Paul Gauthier (aider)
2f38cd184c style: Format clean_metadata.py with black linter 2025-04-20 11:08:57 -07:00
Paul Gauthier (aider)
d8caa76bc8 feat: Compare dicts directly to avoid spurious diffs in metadata cleaning 2025-04-20 11:08:53 -07:00
Paul Gauthier (aider)
506c3c928e refactor: Remove unused variable in find_block_lines function 2025-04-20 11:06:20 -07:00
Paul Gauthier (aider)
48ac1de8d3 fix: Remove unused variables and fix whitespace in clean_metadata.py 2025-04-20 11:05:56 -07:00
Paul Gauthier (aider)
ebfce5b0f2 style: Run linter on clean_metadata.py 2025-04-20 11:05:39 -07:00
Paul Gauthier (aider)
58f4db4e52 feat: Implement surgical removal of JSON blocks from metadata file 2025-04-20 11:05:36 -07:00
Paul Gauthier
ba2c4d1eb7 chore: Make clean_metadata.py executable 2025-04-20 11:05:35 -07:00
Paul Gauthier (aider)
6656b5d973 style: Apply linting to clean_metadata.py 2025-04-20 11:03:44 -07:00
Paul Gauthier (aider)
b4673fdc85 Refactor: Remove keys immediately in clean_metadata.py 2025-04-20 11:03:41 -07:00
Paul Gauthier (aider)
ce1266be68 style: Run linter on clean_metadata.py 2025-04-20 11:02:40 -07:00
Paul Gauthier (aider)
226108d05d feat: Prompt user to remove entries from model-metadata.json 2025-04-20 11:02:37 -07:00
Paul Gauthier (aider)
b2d541f1eb style: Fix line length in clean_metadata.py for flake8 compliance 2025-04-20 11:02:00 -07:00
Paul Gauthier (aider)
758020c574 fix: Flip diff order to be from aider -> litellm in script 2025-04-20 11:01:40 -07:00
Paul Gauthier (aider)
876569613b style: Apply linting to clean_metadata.py 2025-04-20 11:00:52 -07:00
Paul Gauthier (aider)
82b26daf37 feat: display matching entries side-by-side with diff highlighting 2025-04-20 11:00:49 -07:00
Paul Gauthier (aider)
be44b65095 style: Apply linting to clean_metadata.py 2025-04-20 11:00:03 -07:00
Paul Gauthier (aider)
8596f0d4a3 feat: Use json5 to load model metadata for lenient parsing 2025-04-20 11:00:00 -07:00
Paul Gauthier
19a94e5f15 fix: Update litellm_path to correctly locate JSON file 2025-04-20 10:59:59 -07:00
Paul Gauthier (aider)
7bde345b83 feat: Add script to find common models in metadata files. 2025-04-20 10:58:38 -07:00
Paul Gauthier
d45a5747ea feat: Add script to clean metadata from files 2025-04-20 10:58:37 -07:00
Paul Gauthier
e560ab61b6 copy 2025-04-20 10:48:39 -07:00
Paul Gauthier
84c3ac93ef copy 2025-04-20 10:37:14 -07:00
Paul Gauthier (aider)
7a50b7779a docs: Update boto3 install instructions for Bedrock 2025-04-20 10:34:13 -07:00
Paul Gauthier
328a3c3178 docs: Improve Amazon Bedrock setup instructions 2025-04-20 10:34:12 -07:00
Paul Gauthier (aider)
21fa54d792 Refactor: Update LLM docs to match gemini.md structure 2025-04-20 10:28:01 -07:00
Paul Gauthier
ec7ac60cfc copy 2025-04-20 08:54:44 -07:00
64 changed files with 1701 additions and 1187 deletions

View File

@@ -1,6 +1,27 @@
# Release history
### main branch
- Add support for `gemini-2.5-flash-preview-04-17` models.
- Improved robustness of edit block parsing when filenames start with backticks or fences.
- Add new `udiff-simple` edit format, for Gemini 2.5 Pro.
- Update default weak/editor models for Gemini 2.5 Pro models to use `gemini-2.5-flash-preview-04-17`.
- Instruct models to reply in the user's detected system language.
- Fix parsing of diffs for newly created files (`--- /dev/null`).
- Add markdown syntax highlighting support when editing multi-line commit messages via `/commit`, by Kay Gosho.
- Set Gemini 2.5 Pro models to use the `overeager` prompt setting by default.
- Add common file types (`.svg`, `.pdf`) and IDE directories (`.idea/`, `.vscode/`, etc.) to the default list of ignored files for AI comment scanning (`--watch`).
- Skip scanning files larger than 1MB for AI comments (`--watch`).
- Aider wrote 67% of the code in this release.
### Aider v0.82.2
- Fix editing shell files with diff-fenced, by zjy1412.
- Improve robustness of patch application by allowing multiple update/delete actions for the same file within a single response.
- Update prompts to instruct LLMs to consolidate all edits for a given file into a single block within the patch.
### Aider v0.82.1
- Added support for `o3` and `o4-mini` including provider-specific versions for OpenAI, OpenRouter, and Azure.
- Added support for Azure specific `gpt-4.1` and `gpt-4.1-mini` models.
- Disabled streaming for `o3` models since you need identity verification to stream.

View File

@@ -27,7 +27,7 @@ cog.out(text)
<a href="https://github.com/Aider-AI/aider/stargazers"><img alt="GitHub Stars" title="Total number of GitHub stars the Aider project has received"
src="https://img.shields.io/github/stars/Aider-AI/aider?style=flat-square&logo=github&color=f1c40f&labelColor=555555"/></a>
<a href="https://pypi.org/project/aider-chat/"><img alt="PyPI Downloads" title="Total number of installations via pip from PyPI"
src="https://img.shields.io/badge/📦%20Installs-2.0M-2ecc71?style=flat-square&labelColor=555555"/></a>
src="https://img.shields.io/badge/📦%20Installs-2.1M-2ecc71?style=flat-square&labelColor=555555"/></a>
<img alt="Tokens per week" title="Number of tokens processed weekly by Aider users"
src="https://img.shields.io/badge/📈%20Tokens%2Fweek-15B-3498db?style=flat-square&labelColor=555555"/>
<a href="https://openrouter.ai/#options-menu"><img alt="OpenRouter Ranking" title="Aider's ranking among applications on the OpenRouter platform"
@@ -140,7 +140,7 @@ See the [installation instructions](https://aider.chat/docs/install.html) and [u
## Kind Words From Users
- *"My life has changed this week. There's finally an AI coding tool that's good enough to keep up with me... Aider... It's going to rock your world."* — [Eric S. Raymond](https://x.com/esrtweet/status/1910809356381413593)
- *"My life has changed... There's finally an AI coding tool that's good enough to keep up with me... Aider... It's going to rock your world."* — [Eric S. Raymond](https://x.com/esrtweet/status/1910809356381413593)
- *"The best free open source AI coding assistant."* — [IndyDevDan](https://youtu.be/YALpX8oOn78)
- *"The best AI coding assistant so far."* — [Matthew Berman](https://www.youtube.com/watch?v=df8afeb1FY8)
- *"Aider ... has easily quadrupled my coding productivity."* — [SOLAR_FIELDS](https://news.ycombinator.com/item?id=36212100)
@@ -168,7 +168,7 @@ See the [installation instructions](https://aider.chat/docs/install.html) and [u
- *"Aider is also my best friend."* — [jzn21](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27dcnb/)
- *"Try Aider, it's worth it."* — [jorgejhms](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27cp99/)
- *"I like aider :)"* — [Chenwei Cui](https://x.com/ccui42/status/1904965344999145698)
- *"Aider is the precision tool of LLM code gen. It is minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control."* — [Reilly Sweetland](https://x.com/rsweetland/status/1904963807237259586)
- *"Aider is the precision tool of LLM code gen... Minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control."* — [Reilly Sweetland](https://x.com/rsweetland/status/1904963807237259586)
- *"Cannot believe aider vibe coded a 650 LOC feature across service and cli today in 1 shot."* - [autopoietist](https://discord.com/channels/1131200896827654144/1131200896827654149/1355675042259796101)
- *"Oh no the secret is out! Yes, Aider is the best coding tool around. I highly, highly recommend it to anyone."* — [Joshua D Vander Hook](https://x.com/jodavaho/status/1911154899057795218)
- *"thanks to aider, i have started and finished three personal projects within the last two days"* — [joseph stalzyn](https://x.com/anitaheeder/status/1908338609645904160)

View File

@@ -1,6 +1,6 @@
from packaging import version
__version__ = "0.82.3.dev"
__version__ = "0.82.4.dev"
safe_version = __version__
try:

View File

@@ -10,6 +10,7 @@ from .editor_whole_coder import EditorWholeFileCoder
from .help_coder import HelpCoder
from .patch_coder import PatchCoder
from .udiff_coder import UnifiedDiffCoder
from .udiff_simple import UnifiedDiffSimpleCoder
from .wholefile_coder import WholeFileCoder
# from .single_wholefile_func_coder import SingleWholeFileFunctionCoder
@@ -23,6 +24,7 @@ __all__ = [
WholeFileCoder,
PatchCoder,
UnifiedDiffCoder,
UnifiedDiffSimpleCoder,
# SingleWholeFileFunctionCoder,
ArchitectCoder,
EditorEditBlockCoder,

View File

@@ -32,4 +32,4 @@ Here are summaries of some files present in my git repo.
If you need to see the full contents of any files to answer my questions, ask me to *add them to the chat*.
"""
system_reminder = ""
system_reminder = "{final_reminders}"

View File

@@ -15,6 +15,13 @@ import time
import traceback
from collections import defaultdict
from datetime import datetime
# Optional dependency: used to convert locale codes (eg ``en_US``)
# into human-readable language names (eg ``English``).
try:
from babel import Locale # type: ignore
except ImportError: # Babel not installed we will fall back to a small mapping
Locale = None
from json.decoder import JSONDecodeError
from pathlib import Path
from typing import List
@@ -1011,23 +1018,75 @@ class Coder:
]
self.cur_messages = []
def get_user_language(self):
if self.chat_language:
return self.chat_language
def normalize_language(self, lang_code):
"""
Convert a locale code such as ``en_US`` or ``fr`` into a readable
language name (e.g. ``English`` or ``French``). If Babel is
available it is used for reliable conversion; otherwise a small
built-in fallback map handles common languages.
"""
if not lang_code:
return None
# Probably already a language name
if (
len(lang_code) > 3
and "_" not in lang_code
and "-" not in lang_code
and lang_code[0].isupper()
):
return lang_code
# Preferred: Babel
if Locale is not None:
try:
loc = Locale.parse(lang_code.replace("-", "_"))
return loc.get_display_name("en").capitalize()
except Exception:
pass # Fall back to manual mapping
# Simple fallback for common languages
fallback = {
"en": "English",
"fr": "French",
"es": "Spanish",
"de": "German",
"it": "Italian",
"pt": "Portuguese",
"zh": "Chinese",
"ja": "Japanese",
"ko": "Korean",
"ru": "Russian",
}
return fallback.get(lang_code.split("_")[0].lower(), lang_code)
def get_user_language(self):
"""
Detect the user's language preference and return a human-readable
language name such as ``English``. Detection order:
1. ``self.chat_language`` if explicitly set
2. ``locale.getlocale()``
3. ``LANG`` / ``LANGUAGE`` / ``LC_ALL`` / ``LC_MESSAGES`` environment variables
"""
# Explicit override
if self.chat_language:
return self.normalize_language(self.chat_language)
# System locale
try:
lang = locale.getlocale()[0]
if lang:
return lang # Return the full language code, including country
return self.normalize_language(lang)
except Exception:
pass
pass # pragma: no cover
for env_var in ["LANG", "LANGUAGE", "LC_ALL", "LC_MESSAGES"]:
# Environment variables
for env_var in ("LANG", "LANGUAGE", "LC_ALL", "LC_MESSAGES"):
lang = os.environ.get(env_var)
if lang:
return lang.split(".")[
0
] # Return language and country, but remove encoding if present
lang = lang.split(".")[0] # Strip encoding if present
return self.normalize_language(lang)
return None
@@ -1079,12 +1138,15 @@ class Coder:
return platform_text
def fmt_system_prompt(self, prompt):
final_reminders = []
if self.main_model.lazy:
lazy_prompt = self.gpt_prompts.lazy_prompt
elif self.main_model.overeager:
lazy_prompt = self.gpt_prompts.overeager_prompt
else:
lazy_prompt = ""
final_reminders.append(self.gpt_prompts.lazy_prompt)
if self.main_model.overeager:
final_reminders.append(self.gpt_prompts.overeager_prompt)
user_lang = self.get_user_language()
if user_lang:
final_reminders.append(f"Reply in {user_lang}.\n")
platform_text = self.get_platform_info()
@@ -1111,10 +1173,12 @@ class Coder:
else:
quad_backtick_reminder = ""
final_reminders = "\n\n".join(final_reminders)
prompt = prompt.format(
fence=self.fence,
quad_backtick_reminder=quad_backtick_reminder,
lazy_prompt=lazy_prompt,
final_reminders=final_reminders,
platform=platform_text,
shell_cmd_prompt=shell_cmd_prompt,
rename_with_shell=rename_with_shell,

View File

@@ -15,7 +15,9 @@ You always COMPLETELY IMPLEMENT the needed code!
"""
overeager_prompt = """Pay careful attention to the scope of the user's request.
Do what they ask, but no more."""
Do what they ask, but no more.
Do not improve, comment, fix or modify unrelated parts of the code in any way!
"""
example_messages = []

View File

@@ -412,7 +412,16 @@ def strip_filename(filename, fence):
return
start_fence = fence[0]
if filename.startswith(start_fence) or filename.startswith(triple_backticks):
if filename.startswith(start_fence):
candidate = filename[len(start_fence) :]
if candidate and ("." in candidate or "/" in candidate):
return candidate
return
if filename.startswith(triple_backticks):
candidate = filename[len(triple_backticks) :]
if candidate and ("." in candidate or "/" in candidate):
return candidate
return
filename = filename.rstrip(":")
@@ -456,8 +465,12 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None)
]
# Check if the next line or the one after that is an editblock
next_is_editblock = (i + 1 < len(lines) and head_pattern.match(lines[i + 1].strip())
or i + 2 < len(lines) and head_pattern.match(lines[i + 2].strip()))
next_is_editblock = (
i + 1 < len(lines)
and head_pattern.match(lines[i + 1].strip())
or i + 2 < len(lines)
and head_pattern.match(lines[i + 2].strip())
)
if any(line.strip().startswith(start) for start in shell_starts) and not next_is_editblock:
shell_content = []

View File

@@ -137,7 +137,7 @@ To rename files which have been added to the chat, use shell commands at the end
If the user just says something like "ok" or "go ahead" or "do that" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.
The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.
{lazy_prompt}
{final_reminders}
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
{shell_cmd_reminder}
"""

View File

@@ -1,5 +1,6 @@
# flake8: noqa: E501
from . import shell
from .base_prompts import CoderPrompts
@@ -7,7 +8,7 @@ class EditBlockPrompts(CoderPrompts):
main_system = """Act as an expert software developer.
Always use best practices when coding.
Respect and use existing conventions, libraries, etc that are already present in the code base.
{lazy_prompt}
{final_reminders}
Take requests for changes to the supplied code.
If the request is ambiguous, ask questions.
@@ -28,32 +29,6 @@ You can keep asking if you then decide you need to edit more files.
All changes to files must use this *SEARCH/REPLACE block* format.
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
{shell_cmd_prompt}
"""
shell_cmd_prompt = """
4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.
Just suggest shell commands this way, not example code.
Only suggest complete shell commands that are ready to execute, without placeholders.
Only suggest at most a few shell commands at a time, not more than 1-3, one per line.
Do not suggest multi-line shell commands.
All shell commands will run from the root directory of the user's project.
Use the appropriate shell based on the user's system info:
{platform}
Examples of when to suggest shell commands:
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
- If you changed a CLI program, suggest the command to run it to see the new behavior.
- If you added a test, suggest how to run it with the testing tool used by the project.
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
- If your code changes add new dependencies, suggest the command to install them.
- Etc.
"""
no_shell_cmd_prompt = """
Keep in mind these details about the user's platform and environment:
{platform}
"""
example_messages = [
dict(
@@ -181,7 +156,7 @@ If you want to put code in a new file, use a *SEARCH/REPLACE block* with:
- An empty `SEARCH` section
- The new file's contents in the `REPLACE` section
{rename_with_shell}{go_ahead_tip}{lazy_prompt}ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
{rename_with_shell}{go_ahead_tip}{final_reminders}ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
{shell_cmd_reminder}
"""
@@ -194,14 +169,6 @@ The user will say when they've applied your edits. If they haven't explicitly co
"""
shell_cmd_reminder = """
Examples of when to suggest shell commands:
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
- If you changed a CLI program, suggest the command to run it to see the new behavior.
- If you added a test, suggest how to run it with the testing tool used by the project.
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
- If your code changes add new dependencies, suggest the command to install them.
- Etc.
"""
shell_cmd_prompt = shell.shell_cmd_prompt
no_shell_cmd_prompt = shell.no_shell_cmd_prompt
shell_cmd_reminder = shell.shell_cmd_reminder

View File

@@ -5,7 +5,7 @@ from .editblock_prompts import EditBlockPrompts
class EditorEditBlockPrompts(EditBlockPrompts):
main_system = """Act as an expert software developer who edits source code.
{lazy_prompt}
{final_reminders}
Describe each change with a *SEARCH/REPLACE block* per the examples below.
All changes to files must use this *SEARCH/REPLACE block* format.
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!

View File

@@ -5,6 +5,6 @@ from .wholefile_prompts import WholeFilePrompts
class EditorWholeFilePrompts(WholeFilePrompts):
main_system = """Act as an expert software developer and make changes to source code.
{lazy_prompt}
{final_reminders}
Output a copy of each file that needs changes.
"""

View File

@@ -11,7 +11,7 @@ class PatchPrompts(EditBlockPrompts):
main_system = """Act as an expert software developer.
Always use best practices when coding.
Respect and use existing conventions, libraries, etc that are already present in the code base.
{lazy_prompt}
{final_reminders}
Take requests for changes to the supplied code.
If the request is ambiguous, ask questions.
@@ -156,6 +156,6 @@ For `Add` actions, use the `*** Add File: [path/to/new/file]` marker, followed b
For `Delete` actions, use the `*** Delete File: [path/to/file]` marker. No other lines are needed for the deletion.
{rename_with_shell}{go_ahead_tip}{lazy_prompt}ONLY EVER RETURN CODE IN THE SPECIFIED V4A DIFF FORMAT!
{rename_with_shell}{go_ahead_tip}{final_reminders}ONLY EVER RETURN CODE IN THE SPECIFIED V4A DIFF FORMAT!
{shell_cmd_reminder}
"""

37
aider/coders/shell.py Normal file
View File

@@ -0,0 +1,37 @@
shell_cmd_prompt = """
4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.
Just suggest shell commands this way, not example code.
Only suggest complete shell commands that are ready to execute, without placeholders.
Only suggest at most a few shell commands at a time, not more than 1-3, one per line.
Do not suggest multi-line shell commands.
All shell commands will run from the root directory of the user's project.
Use the appropriate shell based on the user's system info:
{platform}
Examples of when to suggest shell commands:
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
- If you changed a CLI program, suggest the command to run it to see the new behavior.
- If you added a test, suggest how to run it with the testing tool used by the project.
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
- If your code changes add new dependencies, suggest the command to install them.
- Etc.
""" # noqa
no_shell_cmd_prompt = """
Keep in mind these details about the user's platform and environment:
{platform}
""" # noqa
shell_cmd_reminder = """
Examples of when to suggest shell commands:
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
- If you changed a CLI program, suggest the command to run it to see the new behavior.
- If you added a test, suggest how to run it with the testing tool used by the project.
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
- If your code changes add new dependencies, suggest the command to install them.
- Etc.
""" # noqa

View File

@@ -348,8 +348,8 @@ def process_fenced_block(lines, start_line_num):
a_fname = block[0][4:].strip()
b_fname = block[1][4:].strip()
# Check if standard git diff prefixes are present and strip them
if a_fname.startswith("a/") and b_fname.startswith("b/"):
# Check if standard git diff prefixes are present (or /dev/null) and strip them
if (a_fname.startswith("a/") or a_fname == "/dev/null") and b_fname.startswith("b/"):
fname = b_fname[2:]
else:
# Otherwise, assume the path is as intended

View File

@@ -1,11 +1,12 @@
# flake8: noqa: E501
from . import shell
from .base_prompts import CoderPrompts
class UnifiedDiffPrompts(CoderPrompts):
main_system = """Act as an expert software developer.
{lazy_prompt}
{final_reminders}
Always use best practices when coding.
Respect and use existing conventions, libraries, etc that are already present in the code base.
@@ -106,5 +107,9 @@ To move code within a file, use 2 hunks: 1 to delete it from its current locatio
To make a new file, show a diff from `--- /dev/null` to `+++ path/to/new/file.ext`.
{lazy_prompt}
{final_reminders}
"""
shell_cmd_prompt = shell.shell_cmd_prompt
no_shell_cmd_prompt = shell.no_shell_cmd_prompt
shell_cmd_reminder = shell.shell_cmd_reminder

View File

@@ -0,0 +1,14 @@
from .udiff_coder import UnifiedDiffCoder
from .udiff_simple_prompts import UnifiedDiffSimplePrompts
class UnifiedDiffSimpleCoder(UnifiedDiffCoder):
"""
A coder that uses unified diff format for code modifications.
This variant uses a simpler prompt that doesn't mention specific
diff rules like using `@@ ... @@` lines or avoiding line numbers.
"""
edit_format = "udiff-simple"
gpt_prompts = UnifiedDiffSimplePrompts()

View File

@@ -0,0 +1,25 @@
from .udiff_prompts import UnifiedDiffPrompts
class UnifiedDiffSimplePrompts(UnifiedDiffPrompts):
"""
Prompts for the UnifiedDiffSimpleCoder.
Inherits from UnifiedDiffPrompts and can override specific prompts
if a simpler wording is desired for this edit format.
"""
example_messages = []
system_reminder = """# File editing rules:
Return edits similar to unified diffs that `diff -U0` would produce.
The user's patch tool needs CORRECT patches that apply cleanly against the current contents of the file!
Think carefully and make sure you include and mark all lines that need to be removed or changed as `-` lines.
Make sure you mark all new or modified lines with `+`.
Don't leave out any lines or the diff patch won't apply correctly.
To make a new file, show a diff from `--- /dev/null` to `+++ path/to/new/file.ext`.
{final_reminders}
""" # noqa

View File

@@ -10,7 +10,7 @@ If the request is ambiguous, ask questions.
Always reply to the user in {language}.
{lazy_prompt}
{final_reminders}
Once you understand the request you MUST:
1. Determine if any code changes are needed.
2. Explain any needed changes.
@@ -61,7 +61,7 @@ To suggest changes to a file you MUST return a *file listing* that contains the
*NEVER* skip, omit or elide content from a *file listing* using "..." or by adding comments like "... rest of code..."!
Create a new file you MUST return a *file listing* which includes an appropriate filename, including any appropriate path.
{lazy_prompt}
{final_reminders}
"""
redacted_edit_message = "No changes are needed."

View File

@@ -595,7 +595,7 @@ class InputOutput:
current_text = buffer.text
# Open the editor with the current text
edited_text = pipe_editor(input_data=current_text)
edited_text = pipe_editor(input_data=current_text, suffix="md")
# Replace the buffer with the edited text, strip any trailing newlines
buffer.text = edited_text.rstrip("\n")

View File

@@ -88,7 +88,7 @@ MODEL_ALIASES = {
"3": "gpt-3.5-turbo",
# Other models
"deepseek": "deepseek/deepseek-chat",
"flash": "gemini/gemini-2.0-flash-exp",
"flash": "gemini/gemini-2.5-flash-preview-04-17",
"quasar": "openrouter/openrouter/quasar-alpha",
"r1": "deepseek/deepseek-reasoner",
"gemini-2.5-pro": "gemini/gemini-2.5-pro-exp-03-25",

View File

@@ -15,22 +15,6 @@
//"supports_tool_choice": true,
"supports_prompt_caching": true
},
"openrouter/deepseek/deepseek-r1": {
"max_tokens": 8192,
"max_input_tokens": 64000,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000055,
"input_cost_per_token_cache_hit": 0.00000014,
"cache_read_input_token_cost": 0.00000014,
"cache_creation_input_token_cost": 0.0,
"output_cost_per_token": 0.00000219,
"litellm_provider": "openrouter",
"mode": "chat",
//"supports_function_calling": true,
"supports_assistant_prefill": true,
//"supports_tool_choice": true,
"supports_prompt_caching": true
},
"openrouter/deepseek/deepseek-r1:free": {
"max_tokens": 8192,
"max_input_tokens": 64000,
@@ -99,15 +83,6 @@
"output_cost_per_token": 0.000008,
"mode": "chat",
},
"fireworks_ai/accounts/fireworks/models/deepseek-v3": {
"max_tokens": 128000,
"max_input_tokens": 100000,
"max_output_tokens": 8192,
"litellm_provider": "fireworks_ai",
"input_cost_per_token": 0.0000009,
"output_cost_per_token": 0.0000009,
"mode": "chat",
},
"fireworks_ai/accounts/fireworks/models/deepseek-v3-0324": {
"max_tokens": 160000,
"max_input_tokens": 100000,
@@ -117,54 +92,6 @@
"output_cost_per_token": 0.0000009,
"mode": "chat",
},
"o3-mini": {
"max_tokens": 100000,
"max_input_tokens": 200000,
"max_output_tokens": 100000,
"input_cost_per_token": 0.0000011,
"output_cost_per_token": 0.0000044,
"cache_read_input_token_cost": 0.00000055,
"litellm_provider": "openai",
"mode": "chat",
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_vision": true,
"supports_prompt_caching": true,
"supports_system_messages": true,
"supports_response_schema": true
},
"openrouter/openai/o3-mini": {
"max_tokens": 100000,
"max_input_tokens": 200000,
"max_output_tokens": 100000,
"input_cost_per_token": 0.0000011,
"output_cost_per_token": 0.0000044,
"cache_read_input_token_cost": 0.00000055,
"litellm_provider": "openrouter",
"mode": "chat",
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_vision": true,
"supports_prompt_caching": true,
"supports_system_messages": true,
"supports_response_schema": true
},
"openrouter/openai/o3-mini-high": {
"max_tokens": 100000,
"max_input_tokens": 200000,
"max_output_tokens": 100000,
"input_cost_per_token": 0.0000011,
"output_cost_per_token": 0.0000044,
"cache_read_input_token_cost": 0.00000055,
"litellm_provider": "openrouter",
"mode": "chat",
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_vision": true,
"supports_prompt_caching": true,
"supports_system_messages": true,
"supports_response_schema": true
},
"openrouter/openrouter/quasar-alpha": {
"max_input_tokens": 1000000,
"max_output_tokens": 32000,
@@ -203,26 +130,6 @@
"supports_prompt_caching": true,
"supports_system_messages": true
},
"claude-3-7-sonnet-20250219": {
"max_tokens": 8192,
"max_input_tokens": 200000,
"max_output_tokens": 8192,
"input_cost_per_token": 0.000003,
"output_cost_per_token": 0.000015,
"cache_creation_input_token_cost": 0.00000375,
"cache_read_input_token_cost": 0.0000003,
"litellm_provider": "anthropic",
"mode": "chat",
"supports_function_calling": true,
"supports_vision": true,
"tool_use_system_prompt_tokens": 159,
"supports_assistant_prefill": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_response_schema": true,
"deprecation_date": "2025-10-01",
"supports_tool_choice": true
},
"anthropic/claude-3-7-sonnet-20250219": {
"max_tokens": 8192,
"max_input_tokens": 200000,
@@ -243,43 +150,6 @@
"deprecation_date": "2025-10-01",
"supports_tool_choice": true
},
"openrouter/anthropic/claude-3.7-sonnet": {
"max_tokens": 8192,
"max_input_tokens": 200000,
"max_output_tokens": 8192,
"input_cost_per_token": 0.000003,
"output_cost_per_token": 0.000015,
"cache_creation_input_token_cost": 0.00000375,
"cache_read_input_token_cost": 0.0000003,
"litellm_provider": "openrouter",
"mode": "chat",
"supports_function_calling": true,
"supports_vision": true,
"tool_use_system_prompt_tokens": 159,
"supports_assistant_prefill": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_response_schema": true,
"deprecation_date": "2025-10-01",
"supports_tool_choice": true
},
"gpt-4.5-preview": {
"max_tokens": 16384,
"max_input_tokens": 128000,
"max_output_tokens": 16384,
"input_cost_per_token": 0.000075,
"output_cost_per_token": 0.00015,
"cache_read_input_token_cost": 0.0000375,
"litellm_provider": "openai",
"mode": "chat",
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_response_schema": true,
"supports_vision": true,
"supports_prompt_caching": true,
"supports_system_messages": true,
"supports_tool_choice": true
},
"openai/gpt-4.5-preview": {
"max_tokens": 16384,
"max_input_tokens": 128000,
@@ -334,42 +204,6 @@
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"gemini/gemini-2.5-pro-preview-03-25": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 64000,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
"max_audio_length_hours": 8.4,
"max_audio_per_prompt": 1,
"max_pdf_size_mb": 30,
"input_cost_per_image": 0,
"input_cost_per_video_per_second": 0,
"input_cost_per_audio_per_second": 0,
"input_cost_per_token": 0.00000125,
"input_cost_per_character": 0,
"input_cost_per_token_above_128k_tokens": 0,
"input_cost_per_character_above_128k_tokens": 0,
"input_cost_per_image_above_128k_tokens": 0,
"input_cost_per_video_per_second_above_128k_tokens": 0,
"input_cost_per_audio_per_second_above_128k_tokens": 0,
"output_cost_per_token": 0.000010,
"output_cost_per_character": 0,
"output_cost_per_token_above_128k_tokens": 0,
"output_cost_per_character_above_128k_tokens": 0,
"litellm_provider": "gemini",
"mode": "chat",
"supports_system_messages": true,
"supports_function_calling": true,
"supports_vision": true,
"supports_audio_input": true,
"supports_video_input": true,
"supports_pdf_input": true,
"supports_response_schema": true,
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"vertex_ai/gemini-2.5-pro-exp-03-25": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
@@ -523,15 +357,6 @@
"litellm_provider": "openrouter",
"mode": "chat"
},
"xai/grok-3-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0.000003,
"output_cost_per_token": 0.000015,
"litellm_provider": "xai",
"mode": "chat"
},
"openrouter/x-ai/grok-3-mini-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
@@ -541,15 +366,6 @@
"litellm_provider": "openrouter",
"mode": "chat"
},
"xai/grok-3-mini-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0.0000003,
"output_cost_per_token": 0.0000005,
"litellm_provider": "xai",
"mode": "chat"
},
"openrouter/x-ai/grok-3-fast-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
@@ -559,15 +375,6 @@
"litellm_provider": "openrouter",
"mode": "chat"
},
"xai/grok-3-fast-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0.000005,
"output_cost_per_token": 0.000025,
"litellm_provider": "xai",
"mode": "chat"
},
"openrouter/x-ai/grok-3-mini-fast-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
@@ -577,15 +384,6 @@
"litellm_provider": "openrouter",
"mode": "chat"
},
"xai/grok-3-mini-fast-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0.0000006,
"output_cost_per_token": 0.000004,
"litellm_provider": "xai",
"mode": "chat"
},
"openrouter/google/gemini-2.0-flash-exp:free": {
"max_tokens": 8192,
"max_input_tokens": 1048576,

View File

@@ -958,6 +958,7 @@
use_system_prompt: false
- name: gemini/gemini-2.5-pro-preview-03-25
overeager: true
edit_format: diff-fenced
use_repo_map: true
weak_model_name: gemini/gemini-2.0-flash
@@ -965,24 +966,28 @@
- name: gemini/gemini-2.5-pro-exp-03-25
edit_format: diff-fenced
use_repo_map: true
weak_model_name: gemini/gemini-2.0-flash
overeager: true
weak_model_name: gemini/gemini-2.5-flash-preview-04-17
- name: openrouter/google/gemini-2.5-pro-exp-03-25:free
edit_format: diff-fenced
overeager: true
use_repo_map: true
weak_model_name: openrouter/google/gemini-2.0-flash-exp:free
- name: vertex_ai/gemini-2.5-pro-exp-03-25
edit_format: diff-fenced
use_repo_map: true
# Need metadata for this one...
#weak_model_name: vertex_ai/gemini-2.0-flash
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
overeager: true
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
- name: vertex_ai/gemini-2.5-pro-preview-03-25
edit_format: diff-fenced
use_repo_map: true
# Need metadata for this one...
#weak_model_name: vertex_ai/gemini-2.0-flash
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
overeager: true
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
- name: openrouter/openrouter/quasar-alpha
use_repo_map: true
@@ -1367,3 +1372,17 @@
# extra_body:
# reasoning_effort: high
- name: gemini/gemini-2.5-flash-preview-04-17
edit_format: diff
use_repo_map: true
accepts_settings: ["thinking_tokens"]
- name: gemini-2.5-flash-preview-04-17
edit_format: diff
use_repo_map: true
accepts_settings: ["thinking_tokens"]
- name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
edit_format: diff
use_repo_map: true
accepts_settings: ["thinking_tokens"]

View File

@@ -34,6 +34,8 @@ def load_gitignores(gitignore_paths: list[Path]) -> Optional[PathSpec]:
"__pycache__/", # Python cache dir
".DS_Store", # macOS metadata
"Thumbs.db", # Windows thumbnail cache
"*.svg",
"*.pdf",
# IDE files
".idea/", # JetBrains IDEs
".vscode/", # VS Code
@@ -64,7 +66,9 @@ class FileWatcher:
"""Watches source files for changes and AI comments"""
# Compiled regex pattern for AI comments
ai_comment_pattern = re.compile(r"(?:#|//|--|;+) *(ai\b.*|ai\b.*|.*\bai[?!]?) *$", re.IGNORECASE)
ai_comment_pattern = re.compile(
r"(?:#|//|--|;+) *(ai\b.*|ai\b.*|.*\bai[?!]?) *$", re.IGNORECASE
)
def __init__(self, coder, gitignores=None, verbose=False, analytics=None, root=None):
self.coder = coder
@@ -93,15 +97,19 @@ class FileWatcher:
rel_path = path_abs.relative_to(self.root)
if self.verbose:
dump(rel_path)
print("Changed", rel_path)
if self.gitignore_spec and self.gitignore_spec.match_file(
rel_path.as_posix() + ("/" if path_abs.is_dir() else "")
):
return False
# Check file size before reading content
if path_abs.is_file() and path_abs.stat().st_size > 1 * 1024 * 1024: # 1MB limit
return False
if self.verbose:
dump("ok", rel_path)
print("Checking", rel_path)
# Check if file contains AI markers
try:

View File

@@ -24,7 +24,28 @@ cog.out(text)
]]]-->
### main branch
- Add support for `gemini-2.5-flash-preview-04-17` models.
- Improved robustness of edit block parsing when filenames start with backticks or fences.
- Add new `udiff-simple` edit format, for Gemini 2.5 Pro.
- Update default weak/editor models for Gemini 2.5 Pro models to use `gemini-2.5-flash-preview-04-17`.
- Instruct models to reply in the user's detected system language.
- Fix parsing of diffs for newly created files (`--- /dev/null`).
- Add markdown syntax highlighting support when editing multi-line commit messages via `/commit`, by Kay Gosho.
- Set Gemini 2.5 Pro models to use the `overeager` prompt setting by default.
- Add common file types (`.svg`, `.pdf`) and IDE directories (`.idea/`, `.vscode/`, etc.) to the default list of ignored files for AI comment scanning (`--watch`).
- Skip scanning files larger than 1MB for AI comments (`--watch`).
- Aider wrote 67% of the code in this release.
### Aider v0.82.2
- Fix editing shell files with diff-fenced, by zjy1412.
- Improve robustness of patch application by allowing multiple update/delete actions for the same file within a single response.
- Update prompts to instruct LLMs to consolidate all edits for a given file into a single block within the patch.
### Aider v0.82.1
- Added support for `o3` and `o4-mini` including provider-specific versions for OpenAI, OpenRouter, and Azure.
- Added support for Azure specific `gpt-4.1` and `gpt-4.1-mini` models.
- Disabled streaming for `o3` models since you need identity verification to stream.

View File

@@ -1197,4 +1197,30 @@
date: 2025-04-19
versions: 0.82.2.dev
seconds_per_case: 195.6
total_cost: 0.0000
total_cost: 0.0000
- dirname: 2025-04-20-19-54-31--flash25-diff-no-think
test_cases: 225
model: gemini-2.5-flash-preview-04-17 (default)
edit_format: diff
commit_hash: 7fcce5d-dirty
pass_rate_1: 21.8
pass_rate_2: 47.1
pass_num_1: 49
pass_num_2: 106
percent_cases_well_formed: 85.3
error_outputs: 60
num_malformed_responses: 55
num_with_malformed_responses: 33
user_asks: 82
lazy_comments: 1
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 5
test_timeouts: 4
total_tests: 225
command: aider --model gemini/gemini-2.5-flash-preview-04-17
date: 2025-04-20
versions: 0.82.3.dev
seconds_per_case: 50.1
total_cost: 1.8451

View File

@@ -27,7 +27,7 @@ document.addEventListener('DOMContentLoaded', function () {
labels: labels,
datasets: [{
label: 'Aider\'s percent of new code by release',
data: [{% for row in site.data.blame %}{ x: '{{ row.end_tag }}', y: {{ row.aider_percentage }}, lines: {{ row.aider_total }} },{% endfor %}],
data: [{% for row in site.data.blame %}{ x: '{{ row.end_tag }}', y: {{ row.aider_percentage }}, lines: {{ row.aider_total }}, end_date: '{{ row.end_date }}' },{% endfor %}],
backgroundColor: 'rgba(54, 162, 235, 0.8)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
@@ -88,6 +88,10 @@ document.addEventListener('DOMContentLoaded', function () {
var value = context.parsed.y || 0;
var lines = context.raw.lines || 0;
return `${label}: ${Math.round(value)}% (${lines} lines)`;
},
afterLabel: function(context) {
let date = context.raw.end_date || 'n/a';
return `Date: ` + date;
}
}
},

View File

@@ -1,10 +1,13 @@
If you already have python 3.8-3.13 installed, you can get started quickly like this:
If you already have python 3.8-3.13 installed, you can get started quickly like this.
First, install aider:
{% include install.md %}
Start working with aider on your codebase:
```bash
python -m pip install aider-install
aider-install
# Change directory into your codebase
cd /to/your/project

View File

@@ -0,0 +1,5 @@
```bash
python -m pip install aider-install
aider-install
```

View File

@@ -446,7 +446,7 @@ code, pre, .code-block {
}
.testimonial-text::before {
content: "\201C"; /* Opening fancy quote */
content: "\201C\00A0"; /* Opening fancy quote */
color: var(--primary);
margin-right: 4px;
vertical-align: -0.3em;

File diff suppressed because it is too large Load Diff

View File

@@ -117,40 +117,6 @@ For example:
These settings will be merged with any model-specific settings, with the
`aider/extra_params` settings taking precedence for any direct conflicts.
### Controlling o1 reasoning effort
You need this chunk of yaml:
```
extra_params:
extra_body:
reasoning_effort: high
```
This is a full entry for o1 with that setting, obtained by finding the default
entry in the list below and adding the above `extra_params` entry:
```
- name: o1
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
send_undo_reply: false
lazy: false
reminder: user
examples_as_sys_msg: false
cache_control: false
caches_by_default: false
use_system_prompt: true
use_temperature: false
streaming: false
editor_model_name: gpt-4o
editor_edit_format: editor-diff
extra_params:
extra_body:
reasoning_effort: high
```
### Default model settings
Below are all the pre-configured model settings to give a sense for the settings which are supported.
@@ -674,6 +640,12 @@ cog.out("```\n")
editor_edit_format: editor-diff
reasoning_tag: think
- name: gemini-2.5-flash-preview-04-17
edit_format: diff
use_repo_map: true
accepts_settings:
- thinking_tokens
- name: gemini/gemini-1.5-flash-002
- name: gemini/gemini-1.5-flash-exp-0827
@@ -702,15 +674,23 @@ cog.out("```\n")
edit_format: diff
use_repo_map: true
- name: gemini/gemini-2.5-flash-preview-04-17
edit_format: diff
use_repo_map: true
accepts_settings:
- thinking_tokens
- name: gemini/gemini-2.5-pro-exp-03-25
edit_format: diff-fenced
weak_model_name: gemini/gemini-2.0-flash
weak_model_name: gemini/gemini-2.5-flash-preview-04-17
use_repo_map: true
overeager: true
- name: gemini/gemini-2.5-pro-preview-03-25
edit_format: diff-fenced
weak_model_name: gemini/gemini-2.0-flash
use_repo_map: true
overeager: true
- name: gemini/gemini-exp-1114
edit_format: diff
@@ -1240,6 +1220,7 @@ cog.out("```\n")
edit_format: diff-fenced
weak_model_name: openrouter/google/gemini-2.0-flash-exp:free
use_repo_map: true
overeager: true
- name: openrouter/google/gemma-3-27b-it
use_system_prompt: false
@@ -1446,6 +1427,12 @@ cog.out("```\n")
accepts_settings:
- thinking_tokens
- name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
edit_format: diff
use_repo_map: true
accepts_settings:
- thinking_tokens
- name: vertex_ai/claude-3-5-haiku@20241022
edit_format: diff
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
@@ -1496,11 +1483,17 @@ cog.out("```\n")
- name: vertex_ai/gemini-2.5-pro-exp-03-25
edit_format: diff-fenced
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
use_repo_map: true
overeager: true
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
- name: vertex_ai/gemini-2.5-pro-preview-03-25
edit_format: diff-fenced
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
use_repo_map: true
overeager: true
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
- name: vertex_ai/gemini-pro-experimental
edit_format: diff-fenced

View File

@@ -79,7 +79,7 @@ for alias, model in sorted(MODEL_ALIASES.items()):
- `4-turbo`: gpt-4-1106-preview
- `4o`: gpt-4o
- `deepseek`: deepseek/deepseek-chat
- `flash`: gemini/gemini-2.0-flash-exp
- `flash`: gemini/gemini-2.5-flash-preview-04-17
- `gemini`: gemini/gemini-2.5-pro-preview-03-25
- `gemini-2.5-pro`: gemini/gemini-2.5-pro-exp-03-25
- `gemini-exp`: gemini/gemini-2.5-pro-exp-03-25

View File

@@ -264,14 +264,14 @@ tr:hover { background-color: #f5f5f5; }
</style>
<table>
<tr><th>Model Name</th><th class='right'>Total Tokens</th><th class='right'>Percent</th></tr>
<tr><td>gemini/gemini-2.5-pro-exp-03-25</td><td class='right'>2,499,338</td><td class='right'>83.9%</td></tr>
<tr><td>openrouter/anthropic/claude-3.7-sonnet</td><td class='right'>313,377</td><td class='right'>10.5%</td></tr>
<tr><td>o3</td><td class='right'>100,777</td><td class='right'>3.4%</td></tr>
<tr><td>gemini/gemini-2.5-pro-preview-03-25</td><td class='right'>16,524</td><td class='right'>0.6%</td></tr>
<tr><td>o4-mini</td><td class='right'>16,499</td><td class='right'>0.6%</td></tr>
<tr><td>gpt-4.1-mini</td><td class='right'>11,775</td><td class='right'>0.4%</td></tr>
<tr><td>gpt-4.1</td><td class='right'>10,687</td><td class='right'>0.4%</td></tr>
<tr><td>None</td><td class='right'>8,001</td><td class='right'>0.3%</td></tr>
<tr><td>gemini/gemini-2.5-pro-exp-03-25</td><td class='right'>1,826,168</td><td class='right'>83.3%</td></tr>
<tr><td>o3</td><td class='right'>239,619</td><td class='right'>10.9%</td></tr>
<tr><td>openrouter/anthropic/claude-3.7-sonnet</td><td class='right'>56,318</td><td class='right'>2.6%</td></tr>
<tr><td>gemini/gemini-2.5-flash-preview-04-17</td><td class='right'>18,645</td><td class='right'>0.9%</td></tr>
<tr><td>gemini/gemini-2.5-pro-preview-03-25</td><td class='right'>16,524</td><td class='right'>0.8%</td></tr>
<tr><td>o4-mini</td><td class='right'>16,499</td><td class='right'>0.8%</td></tr>
<tr><td>xai/grok-3-fast-beta</td><td class='right'>10,288</td><td class='right'>0.5%</td></tr>
<tr><td>None</td><td class='right'>8,001</td><td class='right'>0.4%</td></tr>
<tr><td>gemini/REDACTED</td><td class='right'>606</td><td class='right'>0.0%</td></tr>
</table>

View File

@@ -16,9 +16,10 @@ description: Aider can connect to most LLMs for AI pair programming.
Aider works best with these models, which are skilled at editing code:
- [Gemini 2.5 Pro](/docs/llms/gemini.html)
- [DeepSeek R1 and V3](/docs/llms/deepseek.html)
- [Claude 3.7 Sonnet](/docs/llms/anthropic.html)
- [OpenAI o1, o3-mini and GPT-4o](/docs/llms/openai.html)
- [OpenAI o3, o4-mini and GPT-4.1](/docs/llms/openai.html)
## Free models
@@ -26,10 +27,8 @@ Aider works best with these models, which are skilled at editing code:
Aider works with a number of **free** API providers:
- Google's [Gemini 1.5 Pro](/docs/llms/gemini.html) works with aider, with
code editing capabilities similar to GPT-3.5.
- You can use [Llama 3 70B on Groq](/docs/llms/groq.html) which is comparable to GPT-3.5 in code editing performance.
- Cohere also offers free API access to their [Command-R+ model](/docs/llms/cohere.html), which works with aider as a *very basic* coding assistant.
- [OpenRouter offers free access to many models](https://openrouter.ai/models/?q=free), with limitations on daily usage.
- Google's [Gemini 2.5 Pro Exp](/docs/llms/gemini.html) works very well with aider.
## Local models
{: .no_toc }

View File

@@ -10,21 +10,26 @@ To work with Anthropic's models, you need to provide your
either in the `ANTHROPIC_API_KEY` environment variable or
via the `--anthropic-api-key` command line switch.
Aider has some built in shortcuts for the most popular Anthropic models and
has been tested and benchmarked to work well with them:
First, install aider:
{% include install.md %}
Then configure your API keys:
```
python -m pip install -U aider-chat
export ANTHROPIC_API_KEY=<key> # Mac/Linux
setx ANTHROPIC_API_KEY <key> # Windows, restart shell after setx
```
Start working with aider and Anthropic on your codebase:
```bash
# Change directory into your codebase
cd /to/your/project
# Aider uses Claude 3.7 Sonnet by default
aider
# Claude 3 Opus
aider --model claude-3-opus-20240229
# List models available from Anthropic
aider --list-models anthropic/
```

View File

@@ -7,9 +7,13 @@ nav_order: 500
Aider can connect to the OpenAI models on Azure.
```
python -m pip install -U aider-chat
First, install aider:
{% include install.md %}
Then configure your API keys and endpoint:
```
# Mac/Linux:
export AZURE_API_KEY=<key>
export AZURE_API_VERSION=2024-12-01-preview
@@ -20,6 +24,13 @@ setx AZURE_API_KEY <key>
setx AZURE_API_VERSION 2024-12-01-preview
setx AZURE_API_BASE https://myendpt.openai.azure.com
# ... restart your shell after setx commands
```
Start working with aider and Azure on your codebase:
```bash
# Change directory into your codebase
cd /to/your/project
aider --model azure/<your_model_deployment_name>

View File

@@ -6,8 +6,6 @@ nav_order: 560
# Amazon Bedrock
Aider can connect to models provided by Amazon Bedrock.
You will need to have an AWS account with access to the Bedrock service.
To configure Aider to use the Amazon Bedrock API, you need to set up your AWS credentials.
This can be done using the AWS CLI or by setting environment variables.
@@ -37,6 +35,14 @@ feature, you will receive an error message like the following:
anthropic.claude-3-7-sonnet-20250219-v1:0 with on-demand throughput isn\xe2\x80\x99t supported. Retry your
request with the ID or ARN of an inference profile that contains this model."}'
## Installation and Configuration
First, install aider:
{% include install.md %}
Next, configure your AWS credentials. This can be done using the AWS CLI or by setting environment variables.
## AWS CLI Configuration
If you haven't already, install the [AWS CLI](https://aws.amazon.com/cli/) and configure it with your credentials:
@@ -49,7 +55,7 @@ This will prompt you to enter your AWS Access Key ID, Secret Access Key, and def
## Environment Variables
Alternatively, you can set the following environment variables:
You can set the following environment variables:
```bash
export AWS_REGION=your_preferred_region
@@ -75,32 +81,15 @@ $env:AWS_SECRET_ACCESS_KEY = 'your_secret_key'
$env:AWS_REGION = 'us-west-2' # Put whichever AWS region that you'd like, that the Bedrock service supports.
```
## Install boto3
The AWS Bedrock provider requires the `boto3` package in order to function correctly:
```bash
pip install boto3
```
To use aider installed via `pipx` with AWS Bedrock, you must add the `boto3` dependency to aider's virtual environment by running
```bash
pipx inject aider-chat boto3
```
You must install `boto3` dependency to aider's virtual environment installed via one-liner or uv by running
```bash
uv tool run --from aider-chat pip install boto3
```
## Running Aider with Bedrock
## Get Started
Once your AWS credentials are set up, you can run Aider with the `--model` command line switch, specifying the Bedrock model you want to use:
```bash
# Change directory into your codebase
cd /to/your/project
aider --model bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0
```
@@ -121,6 +110,20 @@ aider --list-models bedrock/
Make sure you have access to these models in your AWS account before attempting to use them with Aider.
## Install boto3
You may need to install the `boto3` package.
```bash
# If you installed with aider-install or `uv tool`
uv tool run --from aider-chat pip install boto3
# Or with pipx...
pipx inject aider-chat boto3
# Or with pip
pip install -U boto3
```
# More info
For more information on Amazon Bedrock and its models, refer to the [official AWS documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/what-is-bedrock.html).

View File

@@ -10,13 +10,22 @@ Their Command-R+ model works well with aider
as a *very basic* coding assistant.
You'll need a [Cohere API key](https://dashboard.cohere.com/welcome/login).
To use **Command-R+**:
First, install aider:
{% include install.md %}
Then configure your API keys:
```
python -m pip install -U aider-chat
export COHERE_API_KEY=<key> # Mac/Linux
setx COHERE_API_KEY <key> # Windows, restart shell after setx
```
Start working with aider and Cohere on your codebase:
```bash
# Change directory into your codebase
cd /to/your/project
aider --model command-r-plus-08-2024

View File

@@ -9,11 +9,22 @@ Aider can connect to the DeepSeek.com API.
To work with DeepSeek's models, you need to set the `DEEPSEEK_API_KEY` environment variable with your [DeepSeek API key](https://platform.deepseek.com/api_keys).
The DeepSeek Chat V3 model has a top score on aider's code editing benchmark.
```
python -m pip install -U aider-chat
First, install aider:
{% include install.md %}
Then configure your API keys:
```
export DEEPSEEK_API_KEY=<key> # Mac/Linux
setx DEEPSEEK_API_KEY <key> # Windows, restart shell after setx
```
Start working with aider and DeepSeek on your codebase:
```bash
# Change directory into your codebase
cd /to/your/project
# Use DeepSeek Chat v3
aider --model deepseek/deepseek-chat

View File

@@ -7,22 +7,43 @@ nav_order: 300
You'll need a [Gemini API key](https://aistudio.google.com/app/u/2/apikey).
```
python -m pip install -U aider-chat
First, install aider:
# You may need to install google-generativeai
pip install -U google-generativeai
{% include install.md %}
# Or with pipx...
pipx inject aider-chat google-generativeai
Then configure your API keys:
```bash
export GEMINI_API_KEY=<key> # Mac/Linux
setx GEMINI_API_KEY <key> # Windows, restart shell after setx
```
# You can run the Gemini 2.5 Pro model with:
aider --model gemini-2.5-pro
Start working with aider and Gemini on your codebase:
```bash
# Change directory into your codebase
cd /to/your/project
# You can run the Gemini 2.5 Pro model with this shortcut:
aider --model gemini
# You can run the Gemini 2.5 Pro Exp for free, with usage limits:
aider --model gemini-exp
# List models available from Gemini
aider --list-models gemini/
```
You may need to install the `google-generativeai` package.
```bash
# If you installed with aider-install or `uv tool`
uv tool run --from aider-chat pip install google-generativeai
# Or with pipx...
pipx inject aider-chat google-generativeai
# Or with pip
pip install -U google-generativeai
```

View File

@@ -10,13 +10,22 @@ The Llama 3 70B model works
well with aider and is comparable to GPT-3.5 in code editing performance.
You'll need a [Groq API key](https://console.groq.com/keys).
To use **Llama3 70B**:
First, install aider:
{% include install.md %}
Then configure your API keys:
```
python -m pip install -U aider-chat
export GROQ_API_KEY=<key> # Mac/Linux
setx GROQ_API_KEY <key> # Windows, restart shell after setx
```
Start working with aider and Groq on your codebase:
```bash
# Change directory into your codebase
cd /to/your/project
aider --model groq/llama3-70b-8192

View File

@@ -5,11 +5,15 @@ nav_order: 400
# LM Studio
To use LM Studio:
Aider can connect to models served by LM Studio.
First, install aider:
{% include install.md %}
Then configure your API key and endpoint:
```
python -m pip install -U aider-chat
# Must set a value here even if its a dummy value
export LM_STUDIO_API_KEY=dummy-api-key # Mac/Linux
setx LM_STUDIO_API_KEY dummy-api-key # Windows, restart shell after setx
@@ -17,12 +21,19 @@ setx LM_STUDIO_API_KEY dummy-api-key # Windows, restart shell after setx
# LM Studio default server URL is http://localhost:1234/v1
export LM_STUDIO_API_BASE=http://localhost:1234/v1 # Mac/Linux
setx LM_STUDIO_API_BASE http://localhost:1234/v1 # Windows, restart shell after setx
aider --model lm_studio/<your-model-name>
```
**Note:** Even though LM Studio doesn't require an API Key out of the box the `LM_STUDIO_API_KEY` must have a dummy value like `dummy-api-key` set or the client request will fail trying to send an empty `Bearer` token.
Start working with aider and LM Studio on your codebase:
```bash
# Change directory into your codebase
cd /to/your/project
aider --model lm_studio/<your-model-name>
```
See the [model warnings](warnings.html)
section for information on warnings which will occur
when working with models that aider is not familiar with.

View File

@@ -7,6 +7,19 @@ nav_order: 500
Aider can connect to local Ollama models.
First, install aider:
{% include install.md %}
Then configure your Ollama API endpoint (usually the default):
```bash
export OLLAMA_API_BASE=http://127.0.0.1:11434 # Mac/Linux
setx OLLAMA_API_BASE http://127.0.0.1:11434 # Windows, restart shell after setx
```
Start working with aider and Ollama on your codebase:
```
# Pull the model
ollama pull <model>
@@ -14,11 +27,8 @@ ollama pull <model>
# Start your ollama server, increasing the context window to 8k tokens
OLLAMA_CONTEXT_LENGTH=8192 ollama serve
# In another terminal window...
python -m pip install -U aider-chat
export OLLAMA_API_BASE=http://127.0.0.1:11434 # Mac/Linux
setx OLLAMA_API_BASE http://127.0.0.1:11434 # Windows, restart shell after setx
# In another terminal window, change directory into your codebase
cd /to/your/project
aider --model ollama_chat/<model>
```

View File

@@ -7,10 +7,13 @@ nav_order: 500
Aider can connect to any LLM which is accessible via an OpenAI compatible API endpoint.
```
python -m pip install aider-install
aider-install
First, install aider:
{% include install.md %}
Then configure your API key and endpoint:
```
# Mac/Linux:
export OPENAI_API_BASE=<endpoint>
export OPENAI_API_KEY=<key>
@@ -19,6 +22,13 @@ export OPENAI_API_KEY=<key>
setx OPENAI_API_BASE <endpoint>
setx OPENAI_API_KEY <key>
# ... restart shell after setx commands
```
Start working with aider and your OpenAI compatible API on your codebase:
```bash
# Change directory into your codebase
cd /to/your/project
# Prefix the model name with openai/
aider --model openai/<model-name>

View File

@@ -10,27 +10,34 @@ To work with OpenAI's models, you need to provide your
either in the `OPENAI_API_KEY` environment variable or
via the `--api-key openai=<key>` command line switch.
Aider has some built in shortcuts for the most popular OpenAI models and
has been tested and benchmarked to work well with them:
First, install aider:
{% include install.md %}
Then configure your API keys:
```
python -m pip install -U aider-chat
export OPENAI_API_KEY=<key> # Mac/Linux
setx OPENAI_API_KEY <key> # Windows, restart shell after setx
```
Start working with aider and OpenAI on your codebase:
```bash
# Change directory into your codebase
cd /to/your/project
# o3-mini
aider --model o3-mini --api-key openai=<key>
aider --model o3-mini
# o1-mini
aider --model o1-mini --api-key openai=<key>
aider --model o1-mini
# GPT-4o
aider --model gpt-4o --api-key openai=<key>
aider --model gpt-4o
# List models available from OpenAI
aider --list-models openai/
# You can also store you API key in environment variables (or .env)
export OPENAI_API_KEY=<key> # Mac/Linux
setx OPENAI_API_KEY <key> # Windows, restart shell after setx
```
You can use `aider --model <model-name>` to use any other OpenAI model.

View File

@@ -8,11 +8,22 @@ nav_order: 500
Aider can connect to [models provided by OpenRouter](https://openrouter.ai/models?o=top-weekly):
You'll need an [OpenRouter API key](https://openrouter.ai/keys).
```
python -m pip install -U aider-chat
First, install aider:
{% include install.md %}
Then configure your API keys:
```
export OPENROUTER_API_KEY=<key> # Mac/Linux
setx OPENROUTER_API_KEY <key> # Windows, restart shell after setx
```
Start working with aider and OpenRouter on your codebase:
```bash
# Change directory into your codebase
cd /to/your/project
# Or any other open router model
aider --model openrouter/<provider>/<model>
@@ -23,16 +34,6 @@ aider --list-models openrouter/
In particular, many aider users access Sonnet via OpenRouter:
```
python -m pip install -U aider-chat
export OPENROUTER_API_KEY=<key> # Mac/Linux
setx OPENROUTER_API_KEY <key> # Windows, restart shell after setx
aider --model openrouter/anthropic/claude-3.7-sonnet
```
{: .tip }
If you get errors, check your
[OpenRouter privacy settings](https://openrouter.ai/settings/privacy).

View File

@@ -55,8 +55,8 @@ lines = run(
lines = ['- ' + line for line in lines.splitlines(keepends=True)]
cog.out(''.join(lines))
]]]-->
- ALEPHALPHA_API_KEY
- ALEPH_ALPHA_API_KEY
- ALEPHALPHA_API_KEY
- ANTHROPIC_API_KEY
- ANYSCALE_API_KEY
- AZURE_AI_API_KEY
@@ -66,15 +66,15 @@ cog.out(''.join(lines))
- CEREBRAS_API_KEY
- CLARIFAI_API_KEY
- CLOUDFLARE_API_KEY
- CO_API_KEY
- CODESTRAL_API_KEY
- COHERE_API_KEY
- CO_API_KEY
- DATABRICKS_API_KEY
- DEEPINFRA_API_KEY
- DEEPSEEK_API_KEY
- FIREWORKSAI_API_KEY
- FIREWORKS_AI_API_KEY
- FIREWORKS_API_KEY
- FIREWORKSAI_API_KEY
- GEMINI_API_KEY
- GROQ_API_KEY
- HUGGINGFACE_API_KEY

View File

@@ -13,6 +13,10 @@ or service account with permission to use the Vertex AI API.
With your chosen login method, the gcloud CLI should automatically set the
`GOOGLE_APPLICATION_CREDENTIALS` environment variable which points to the credentials file.
First, install aider:
{% include install.md %}
To configure Aider to use the Vertex AI API, you need to set `VERTEXAI_PROJECT` (the GCP project ID)
and `VERTEXAI_LOCATION` (the GCP region) [environment variables for Aider](/docs/config/dotenv.html).
@@ -27,9 +31,12 @@ VERTEXAI_PROJECT=my-project
VERTEXAI_LOCATION=us-east5
```
Then you can run aider with the `--model` command line switch, like this:
Start working with aider and Vertex AI on your codebase:
```
# Change directory into your codebase
cd /to/your/project
aider --model vertex_ai/claude-3-5-sonnet@20240620
```

View File

@@ -7,14 +7,22 @@ nav_order: 400
You'll need a [xAI API key](https://console.x.ai.).
To use xAI:
First, install aider:
{% include install.md %}
Then configure your API keys:
```bash
python -m pip install aider-install
aider-install
export XAI_API_KEY=<key> # Mac/Linux
setx XAI_API_KEY <key> # Windows, restart shell after setx
```
Start working with aider and xAI on your codebase:
```bash
# Change directory into your codebase
cd /to/your/project
# Grok 3
aider --model xai/grok-3-beta

View File

@@ -17,6 +17,8 @@ First, aider will check which
[keys you have provided via the environment, config files, or command line arguments](https://aider.chat/docs/config/api-keys.html).
Based on the available keys, aider will select the best model to use.
## OpenRouter
If you have not provided any keys, aider will offer to help you connect to
[OpenRouter](http://openrouter.ai)
which provides both free and paid access to most popular LLMs.

View File

@@ -52,8 +52,8 @@ the script as your linter.
# Second attempt will not do anything and exit 0 unless there's a real problem beyond
# the code formatting that was completed.
pre-commit run --files $* >/dev/null \
|| pre-commit run --files $*
pre-commit run --files "$@" >/dev/null \
|| pre-commit run --files "$@"
```
## Testing

View File

@@ -73,7 +73,7 @@ cog.out(text)
</a>
<a href="https://pypi.org/project/aider-chat/" class="github-badge badge-installs" title="Total number of installations via pip from PyPI">
<span class="badge-label">📦 Installs</span>
<span class="badge-value">2.0M</span>
<span class="badge-value">2.1M</span>
</a>
<div class="github-badge badge-tokens" title="Number of tokens processed weekly by Aider users">
<span class="badge-label">📈 Tokens/week</span>
@@ -269,7 +269,7 @@ cog.out(text)
<script>
const testimonials = [
{
text: "My life has changed this week. There's finally an AI coding tool that's good enough to keep up with me... Aider... It's going to rock your world.",
text: "My life has changed... There's finally an AI coding tool that's good enough to keep up with me... Aider... It's going to rock your world.",
author: "Eric S. Raymond",
link: "https://x.com/esrtweet/status/1910809356381413593"
},
@@ -409,7 +409,7 @@ const testimonials = [
link: "https://x.com/ccui42/status/1904965344999145698"
},
{
text: "Aider is the precision tool of LLM code gen. It is minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control.",
text: "Aider is the precision tool of LLM code gen... Minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control.",
author: "Reilly Sweetland",
link: "https://x.com/rsweetland/status/1904963807237259586"
},

View File

@@ -209,6 +209,9 @@ def main(
reasoning_effort: Optional[str] = typer.Option(
None, "--reasoning-effort", help="Set reasoning effort for models that support it"
),
thinking_tokens: Optional[int] = typer.Option(
None, "--thinking-tokens", help="Set thinking tokens for models that support it"
),
exercises_dir: str = typer.Option(
EXERCISES_DIR_DEFAULT, "--exercises-dir", help="Directory with exercise files"
),
@@ -366,6 +369,7 @@ def main(
num_ctx,
sleep,
reasoning_effort,
thinking_tokens,
)
all_results.append(results)
@@ -391,6 +395,7 @@ def main(
num_ctx,
sleep,
reasoning_effort,
thinking_tokens,
)
all_results = run_test_threaded.gather(tqdm=True)
@@ -489,6 +494,7 @@ def summarize_results(dirname, stats_languages=None):
res.lazy_comments = 0
res.reasoning_effort = None
res.thinking_tokens = None
variants = defaultdict(set)
for results in all_results:
@@ -518,6 +524,7 @@ def summarize_results(dirname, stats_languages=None):
res.indentation_errors += results.get("indentation_errors", 0)
res.reasoning_effort = results.get("reasoning_effort")
res.thinking_tokens = results.get("thinking_tokens")
for key in "model edit_format commit_hash editor_model editor_edit_format".split():
val = results.get(key)
@@ -564,6 +571,8 @@ def summarize_results(dirname, stats_languages=None):
if res.reasoning_effort is not None:
print(f" reasoning_effort: {res.reasoning_effort}")
if res.thinking_tokens is not None:
print(f" thinking_tokens: {res.thinking_tokens}")
for i in range(tries):
print(f" pass_rate_{i + 1}: {percents[i]:.1f}")
@@ -650,15 +659,14 @@ def get_replayed_content(replay_dname, test_dname):
def run_test(original_dname, testdir, *args, **kwargs):
try:
return run_test_real(original_dname, testdir, *args, **kwargs)
except Exception as err:
except Exception:
print("=" * 40)
print("Test failed")
print(err)
traceback.print_exc()
testdir = Path(testdir)
results_fname = testdir / ".aider.results.json"
results_fname.write_text(json.dumps(dict(exception=str(err))))
results_fname.write_text(json.dumps(dict(exception=traceback.format_exc())))
def run_test_real(
@@ -677,6 +685,7 @@ def run_test_real(
num_ctx=None,
sleep=0,
reasoning_effort: Optional[str] = None,
thinking_tokens: Optional[int] = None,
read_model_settings=None,
):
if not os.path.isdir(testdir):
@@ -787,6 +796,9 @@ def run_test_real(
if reasoning_effort is not None:
main_model.set_reasoning_effort(reasoning_effort)
if thinking_tokens is not None:
main_model.set_thinking_tokens(thinking_tokens)
dump(main_model.max_chat_history_tokens)
if num_ctx:
@@ -938,6 +950,7 @@ def run_test_real(
indentation_errors=indentation_errors,
lazy_comments=lazy_comments, # Add the count of pattern matches to the results
reasoning_effort=reasoning_effort,
thinking_tokens=thinking_tokens,
chat_hashes=list(
zip(
coder.chat_completion_call_hashes,

View File

@@ -2,8 +2,8 @@
docker run \
-it --rm \
--memory=25g \
--memory-swap=25g \
--memory=12g \
--memory-swap=12g \
--add-host=host.docker.internal:host-gateway \
-v `pwd`:/aider \
-v `pwd`/tmp.benchmarks/.:/benchmarks \

View File

@@ -4,7 +4,7 @@ aiohappyeyeballs==2.6.1
# via
# -c requirements/common-constraints.txt
# aiohttp
aiohttp==3.11.16
aiohttp==3.11.18
# via
# -c requirements/common-constraints.txt
# litellm
@@ -33,11 +33,15 @@ backoff==2.2.1
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
# posthog
beautifulsoup4==4.13.3
beautifulsoup4==4.13.4
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
certifi==2025.1.31
cachetools==5.5.2
# via
# -c requirements/common-constraints.txt
# google-auth
certifi==2025.4.26
# via
# -c requirements/common-constraints.txt
# httpcore
@@ -48,7 +52,7 @@ cffi==1.17.1
# -c requirements/common-constraints.txt
# sounddevice
# soundfile
charset-normalizer==3.4.1
charset-normalizer==3.4.2
# via
# -c requirements/common-constraints.txt
# requests
@@ -81,7 +85,7 @@ flake8==7.2.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
frozenlist==1.5.0
frozenlist==1.6.0
# via
# -c requirements/common-constraints.txt
# aiohttp
@@ -98,18 +102,67 @@ gitpython==3.1.44
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
google-ai-generativelanguage==0.6.15
# via
# -c requirements/common-constraints.txt
# google-generativeai
google-api-core[grpc]==2.24.2
# via
# -c requirements/common-constraints.txt
# google-ai-generativelanguage
# google-api-python-client
# google-generativeai
google-api-python-client==2.169.0
# via
# -c requirements/common-constraints.txt
# google-generativeai
google-auth==2.40.0
# via
# -c requirements/common-constraints.txt
# google-ai-generativelanguage
# google-api-core
# google-api-python-client
# google-auth-httplib2
# google-generativeai
google-auth-httplib2==0.2.0
# via
# -c requirements/common-constraints.txt
# google-api-python-client
google-generativeai==0.8.5
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
googleapis-common-protos==1.70.0
# via
# -c requirements/common-constraints.txt
# google-api-core
# grpcio-status
grep-ast==0.8.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
h11==0.14.0
grpcio==1.71.0
# via
# -c requirements/common-constraints.txt
# google-api-core
# grpcio-status
grpcio-status==1.71.0
# via
# -c requirements/common-constraints.txt
# google-api-core
h11==0.16.0
# via
# -c requirements/common-constraints.txt
# httpcore
httpcore==1.0.8
httpcore==1.0.9
# via
# -c requirements/common-constraints.txt
# httpx
httplib2==0.22.0
# via
# -c requirements/common-constraints.txt
# google-api-python-client
# google-auth-httplib2
httpx==0.28.1
# via
# -c requirements/common-constraints.txt
@@ -152,11 +205,11 @@ jsonschema==4.23.0
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
# litellm
jsonschema-specifications==2024.10.1
jsonschema-specifications==2025.4.1
# via
# -c requirements/common-constraints.txt
# jsonschema
litellm==1.65.7
litellm==1.68.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
@@ -180,10 +233,6 @@ mixpanel==4.10.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
monotonic==1.6
# via
# -c requirements/common-constraints.txt
# posthog
multidict==6.4.3
# via
# -c requirements/common-constraints.txt
@@ -198,7 +247,7 @@ numpy==1.26.4
# -c requirements/common-constraints.txt
# scipy
# soundfile
openai==1.73.0
openai==1.75.0
# via
# -c requirements/common-constraints.txt
# litellm
@@ -216,19 +265,19 @@ pexpect==4.9.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
pillow==11.1.0
pillow==11.2.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
pip==25.0.1
pip==25.1.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
posthog==3.24.1
posthog==4.0.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
prompt-toolkit==3.0.50
prompt-toolkit==3.0.51
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
@@ -237,6 +286,20 @@ propcache==0.3.1
# -c requirements/common-constraints.txt
# aiohttp
# yarl
proto-plus==1.26.1
# via
# -c requirements/common-constraints.txt
# google-ai-generativelanguage
# google-api-core
protobuf==5.29.4
# via
# -c requirements/common-constraints.txt
# google-ai-generativelanguage
# google-api-core
# google-generativeai
# googleapis-common-protos
# grpcio-status
# proto-plus
psutil==7.0.0
# via
# -c requirements/common-constraints.txt
@@ -245,6 +308,15 @@ ptyprocess==0.7.0
# via
# -c requirements/common-constraints.txt
# pexpect
pyasn1==0.6.1
# via
# -c requirements/common-constraints.txt
# pyasn1-modules
# rsa
pyasn1-modules==0.4.2
# via
# -c requirements/common-constraints.txt
# google-auth
pycodestyle==2.13.0
# via
# -c requirements/common-constraints.txt
@@ -253,12 +325,13 @@ pycparser==2.22
# via
# -c requirements/common-constraints.txt
# cffi
pydantic==2.11.3
pydantic==2.11.4
# via
# -c requirements/common-constraints.txt
# google-generativeai
# litellm
# openai
pydantic-core==2.33.1
pydantic-core==2.33.2
# via
# -c requirements/common-constraints.txt
# pydantic
@@ -278,6 +351,10 @@ pypandoc==1.15
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
pyparsing==3.2.3
# via
# -c requirements/common-constraints.txt
# httplib2
pyperclip==1.9.0
# via
# -c requirements/common-constraints.txt
@@ -307,6 +384,7 @@ regex==2024.11.6
requests==2.32.3
# via
# -c requirements/common-constraints.txt
# google-api-core
# huggingface-hub
# mixpanel
# posthog
@@ -320,6 +398,10 @@ rpds-py==0.24.0
# -c requirements/common-constraints.txt
# jsonschema
# referencing
rsa==4.9.1
# via
# -c requirements/common-constraints.txt
# google-auth
scipy==1.13.1
# via
# -c requirements/common-constraints.txt
@@ -351,7 +433,7 @@ soundfile==0.13.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
soupsieve==2.6
soupsieve==2.7
# via
# -c requirements/common-constraints.txt
# beautifulsoup4
@@ -366,6 +448,7 @@ tokenizers==0.21.1
tqdm==4.67.1
# via
# -c requirements/common-constraints.txt
# google-generativeai
# huggingface-hub
# openai
# via
@@ -379,7 +462,7 @@ tree-sitter-embedded-template==0.23.2
# via
# -c requirements/common-constraints.txt
# tree-sitter-language-pack
tree-sitter-language-pack==0.7.1
tree-sitter-language-pack==0.7.2
# via
# -c requirements/common-constraints.txt
# grep-ast
@@ -392,6 +475,7 @@ typing-extensions==4.13.2
# -c requirements/common-constraints.txt
# anyio
# beautifulsoup4
# google-generativeai
# huggingface-hub
# openai
# pydantic
@@ -402,6 +486,10 @@ typing-inspection==0.4.0
# via
# -c requirements/common-constraints.txt
# pydantic
uritemplate==4.1.1
# via
# -c requirements/common-constraints.txt
# google-api-python-client
urllib3==2.4.0
# via
# -c requirements/common-constraints.txt
@@ -415,7 +503,7 @@ wcwidth==0.2.13
# via
# -c requirements/common-constraints.txt
# prompt-toolkit
yarl==1.19.0
yarl==1.20.0
# via
# -c requirements/common-constraints.txt
# aiohttp

View File

@@ -2,7 +2,7 @@
# uv pip compile --no-strip-extras --output-file=requirements/common-constraints.txt requirements/requirements.in requirements/requirements-browser.in requirements/requirements-dev.in requirements/requirements-help.in requirements/requirements-playwright.in
aiohappyeyeballs==2.6.1
# via aiohttp
aiohttp==3.11.16
aiohttp==3.11.18
# via
# huggingface-hub
# litellm
@@ -27,9 +27,9 @@ backoff==2.2.1
# via
# -r requirements/requirements.in
# posthog
banks==2.1.1
banks==2.1.2
# via llama-index-core
beautifulsoup4==4.13.3
beautifulsoup4==4.13.4
# via -r requirements/requirements.in
blinker==1.9.0
# via streamlit
@@ -39,7 +39,7 @@ cachetools==5.5.2
# via
# google-auth
# streamlit
certifi==2025.1.31
certifi==2025.4.26
# via
# httpcore
# httpx
@@ -50,7 +50,7 @@ cffi==1.17.1
# soundfile
cfgv==3.4.0
# via pre-commit
charset-normalizer==3.4.1
charset-normalizer==3.4.2
# via requests
click==8.1.8
# via
@@ -67,7 +67,7 @@ colorama==0.4.6
# via griffe
configargparse==1.7
# via -r requirements/requirements.in
contourpy==1.3.1
contourpy==1.3.2
# via matplotlib
cycler==0.12.1
# via matplotlib
@@ -79,7 +79,7 @@ deprecated==1.2.18
# llama-index-core
diff-match-patch==20241021
# via -r requirements/requirements.in
dill==0.3.9
dill==0.4.0
# via
# multiprocess
# pathos
@@ -105,7 +105,7 @@ flake8==7.2.0
# via -r requirements/requirements.in
fonttools==4.57.0
# via matplotlib
frozenlist==1.5.0
frozenlist==1.6.0
# via
# aiohttp
# aiosignal
@@ -120,34 +120,49 @@ gitpython==3.1.44
# via
# -r requirements/requirements.in
# streamlit
google-ai-generativelanguage==0.6.15
# via google-generativeai
google-api-core[grpc]==2.24.2
# via
# google-ai-generativelanguage
# google-api-python-client
# google-cloud-bigquery
# google-cloud-core
google-auth==2.38.0
# google-generativeai
google-api-python-client==2.169.0
# via google-generativeai
google-auth==2.40.0
# via
# google-ai-generativelanguage
# google-api-core
# google-api-python-client
# google-auth-httplib2
# google-cloud-bigquery
# google-cloud-core
# google-generativeai
google-auth-httplib2==0.2.0
# via google-api-python-client
google-cloud-bigquery==3.31.0
# via -r requirements/requirements-dev.in
google-cloud-core==2.4.3
# via google-cloud-bigquery
google-crc32c==1.7.1
# via google-resumable-media
google-generativeai==0.8.5
# via -r requirements/requirements.in
google-resumable-media==2.7.2
# via google-cloud-bigquery
googleapis-common-protos==1.69.2
googleapis-common-protos==1.70.0
# via
# google-api-core
# grpcio-status
greenlet==3.1.1
greenlet==3.2.1
# via
# playwright
# sqlalchemy
grep-ast==0.8.1
# via -r requirements/requirements.in
griffe==1.7.2
griffe==1.7.3
# via banks
grpcio==1.71.0
# via
@@ -155,10 +170,14 @@ grpcio==1.71.0
# grpcio-status
grpcio-status==1.71.0
# via google-api-core
h11==0.14.0
h11==0.16.0
# via httpcore
httpcore==1.0.8
httpcore==1.0.9
# via httpx
httplib2==0.22.0
# via
# google-api-python-client
# google-auth-httplib2
httpx==0.28.1
# via
# litellm
@@ -170,7 +189,7 @@ huggingface-hub[inference]==0.30.2
# sentence-transformers
# tokenizers
# transformers
identify==2.6.9
identify==2.6.10
# via pre-commit
idna==3.10
# via
@@ -197,7 +216,7 @@ jinja2==3.1.6
# torch
jiter==0.9.0
# via openai
joblib==1.4.2
joblib==1.5.0
# via
# nltk
# scikit-learn
@@ -208,11 +227,11 @@ jsonschema==4.23.0
# -r requirements/requirements.in
# altair
# litellm
jsonschema-specifications==2024.10.1
jsonschema-specifications==2025.4.1
# via jsonschema
kiwisolver==1.4.8
# via matplotlib
litellm==1.65.7
litellm==1.68.0
# via -r requirements/requirements.in
llama-index-core==0.12.26
# via
@@ -236,19 +255,17 @@ mdurl==0.1.2
# via markdown-it-py
mixpanel==4.10.1
# via -r requirements/requirements.in
monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.4.3
# via
# aiohttp
# yarl
multiprocess==0.70.17
multiprocess==0.70.18
# via pathos
mypy-extensions==1.0.0
mypy-extensions==1.1.0
# via typing-inspect
narwhals==1.34.1
narwhals==1.38.0
# via altair
nest-asyncio==1.6.0
# via llama-index-core
@@ -274,7 +291,7 @@ numpy==1.26.4
# soundfile
# streamlit
# transformers
openai==1.73.0
openai==1.75.0
# via litellm
packaging==24.2
# via
@@ -292,7 +309,7 @@ pandas==2.2.3
# via
# -r requirements/requirements-dev.in
# streamlit
pathos==0.3.3
pathos==0.3.4
# via lox
pathspec==0.12.1
# via
@@ -300,14 +317,14 @@ pathspec==0.12.1
# grep-ast
pexpect==4.9.0
# via -r requirements/requirements.in
pillow==11.1.0
pillow==11.2.1
# via
# -r requirements/requirements.in
# llama-index-core
# matplotlib
# sentence-transformers
# streamlit
pip==25.0.1
pip==25.1.1
# via
# -r requirements/requirements.in
# pip-tools
@@ -317,29 +334,33 @@ platformdirs==4.3.7
# via
# banks
# virtualenv
playwright==1.51.0
playwright==1.52.0
# via -r requirements/requirements-playwright.in
pluggy==1.5.0
# via pytest
posthog==3.24.1
posthog==4.0.1
# via -r requirements/requirements.in
pox==0.3.5
pox==0.3.6
# via pathos
ppft==1.7.6.9
ppft==1.7.7
# via pathos
pre-commit==4.2.0
# via -r requirements/requirements-dev.in
prompt-toolkit==3.0.50
prompt-toolkit==3.0.51
# via -r requirements/requirements.in
propcache==0.3.1
# via
# aiohttp
# yarl
proto-plus==1.26.1
# via google-api-core
# via
# google-ai-generativelanguage
# google-api-core
protobuf==5.29.4
# via
# google-ai-generativelanguage
# google-api-core
# google-generativeai
# googleapis-common-protos
# grpcio-status
# proto-plus
@@ -348,7 +369,7 @@ psutil==7.0.0
# via -r requirements/requirements.in
ptyprocess==0.7.0
# via pexpect
pyarrow==19.0.1
pyarrow==20.0.0
# via streamlit
pyasn1==0.6.1
# via
@@ -360,19 +381,20 @@ pycodestyle==2.13.0
# via flake8
pycparser==2.22
# via cffi
pydantic==2.11.3
pydantic==2.11.4
# via
# banks
# google-generativeai
# litellm
# llama-index-core
# openai
pydantic-core==2.33.1
pydantic-core==2.33.2
# via pydantic
pydeck==0.9.1
# via streamlit
pydub==0.25.1
# via -r requirements/requirements.in
pyee==12.1.1
pyee==13.0.0
# via playwright
pyflakes==3.3.2
# via flake8
@@ -381,7 +403,9 @@ pygments==2.19.1
pypandoc==1.15
# via -r requirements/requirements.in
pyparsing==3.2.3
# via matplotlib
# via
# httplib2
# matplotlib
pyperclip==1.9.0
# via -r requirements/requirements.in
pyproject-hooks==1.2.0
@@ -439,7 +463,7 @@ rpds-py==0.24.0
# via
# jsonschema
# referencing
rsa==4.9
rsa==4.9.1
# via google-auth
safetensors==0.5.3
# via transformers
@@ -452,9 +476,9 @@ scipy==1.13.1
# sentence-transformers
semver==3.0.4
# via -r requirements/requirements-dev.in
sentence-transformers==4.0.2
sentence-transformers==4.1.0
# via llama-index-embeddings-huggingface
setuptools==78.1.0
setuptools==80.3.1
# via pip-tools
shellingham==1.5.4
# via typer
@@ -475,13 +499,13 @@ sounddevice==0.5.1
# via -r requirements/requirements.in
soundfile==0.13.1
# via -r requirements/requirements.in
soupsieve==2.6
soupsieve==2.7
# via beautifulsoup4
sqlalchemy[asyncio]==2.0.40
# via llama-index-core
streamlit==1.44.1
streamlit==1.45.0
# via -r requirements/requirements-browser.in
sympy==1.13.3
sympy==1.14.0
# via torch
tenacity==9.1.2
# via
@@ -507,13 +531,14 @@ tornado==6.4.2
# via streamlit
tqdm==4.67.1
# via
# google-generativeai
# huggingface-hub
# llama-index-core
# nltk
# openai
# sentence-transformers
# transformers
transformers==4.51.2
transformers==4.51.3
# via sentence-transformers
tree-sitter==0.24.0
# via tree-sitter-language-pack
@@ -521,17 +546,18 @@ tree-sitter-c-sharp==0.23.1
# via tree-sitter-language-pack
tree-sitter-embedded-template==0.23.2
# via tree-sitter-language-pack
tree-sitter-language-pack==0.7.1
tree-sitter-language-pack==0.7.2
# via grep-ast
tree-sitter-yaml==0.7.0
# via tree-sitter-language-pack
typer==0.15.2
typer==0.15.3
# via -r requirements/requirements-dev.in
typing-extensions==4.13.2
# via
# altair
# anyio
# beautifulsoup4
# google-generativeai
# huggingface-hub
# llama-index-core
# openai
@@ -554,13 +580,15 @@ typing-inspection==0.4.0
# via pydantic
tzdata==2025.2
# via pandas
uritemplate==4.1.1
# via google-api-python-client
urllib3==2.4.0
# via
# mixpanel
# requests
uv==0.6.14
uv==0.7.2
# via -r requirements/requirements-dev.in
virtualenv==20.30.0
virtualenv==20.31.1
# via pre-commit
watchfiles==1.0.5
# via -r requirements/requirements.in
@@ -572,7 +600,7 @@ wrapt==1.17.2
# via
# deprecated
# llama-index-core
yarl==1.19.0
yarl==1.20.0
# via aiohttp
zipp==3.21.0
# via importlib-metadata

View File

@@ -17,11 +17,11 @@ cachetools==5.5.2
# via
# -c requirements/common-constraints.txt
# streamlit
certifi==2025.1.31
certifi==2025.4.26
# via
# -c requirements/common-constraints.txt
# requests
charset-normalizer==3.4.1
charset-normalizer==3.4.2
# via
# -c requirements/common-constraints.txt
# requests
@@ -50,7 +50,7 @@ jsonschema==4.23.0
# via
# -c requirements/common-constraints.txt
# altair
jsonschema-specifications==2024.10.1
jsonschema-specifications==2025.4.1
# via
# -c requirements/common-constraints.txt
# jsonschema
@@ -58,7 +58,7 @@ markupsafe==3.0.2
# via
# -c requirements/common-constraints.txt
# jinja2
narwhals==1.34.1
narwhals==1.38.0
# via
# -c requirements/common-constraints.txt
# altair
@@ -77,7 +77,7 @@ pandas==2.2.3
# via
# -c requirements/common-constraints.txt
# streamlit
pillow==11.1.0
pillow==11.2.1
# via
# -c requirements/common-constraints.txt
# streamlit
@@ -85,7 +85,7 @@ protobuf==5.29.4
# via
# -c requirements/common-constraints.txt
# streamlit
pyarrow==19.0.1
pyarrow==20.0.0
# via
# -c requirements/common-constraints.txt
# streamlit
@@ -123,7 +123,7 @@ smmap==5.0.2
# via
# -c requirements/common-constraints.txt
# gitdb
streamlit==1.44.1
streamlit==1.45.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-browser.in

View File

@@ -8,7 +8,7 @@ cachetools==5.5.2
# via
# -c requirements/common-constraints.txt
# google-auth
certifi==2025.1.31
certifi==2025.4.26
# via
# -c requirements/common-constraints.txt
# requests
@@ -16,7 +16,7 @@ cfgv==3.4.0
# via
# -c requirements/common-constraints.txt
# pre-commit
charset-normalizer==3.4.1
charset-normalizer==3.4.2
# via
# -c requirements/common-constraints.txt
# requests
@@ -33,7 +33,7 @@ cogapp==3.4.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
contourpy==1.3.1
contourpy==1.3.2
# via
# -c requirements/common-constraints.txt
# matplotlib
@@ -41,7 +41,7 @@ cycler==0.12.1
# via
# -c requirements/common-constraints.txt
# matplotlib
dill==0.3.9
dill==0.4.0
# via
# -c requirements/common-constraints.txt
# multiprocess
@@ -63,7 +63,7 @@ google-api-core[grpc]==2.24.2
# -c requirements/common-constraints.txt
# google-cloud-bigquery
# google-cloud-core
google-auth==2.38.0
google-auth==2.40.0
# via
# -c requirements/common-constraints.txt
# google-api-core
@@ -85,7 +85,7 @@ google-resumable-media==2.7.2
# via
# -c requirements/common-constraints.txt
# google-cloud-bigquery
googleapis-common-protos==1.69.2
googleapis-common-protos==1.70.0
# via
# -c requirements/common-constraints.txt
# google-api-core
@@ -99,7 +99,7 @@ grpcio-status==1.71.0
# via
# -c requirements/common-constraints.txt
# google-api-core
identify==2.6.9
identify==2.6.10
# via
# -c requirements/common-constraints.txt
# pre-commit
@@ -135,7 +135,7 @@ mdurl==0.1.2
# via
# -c requirements/common-constraints.txt
# markdown-it-py
multiprocess==0.70.17
multiprocess==0.70.18
# via
# -c requirements/common-constraints.txt
# pathos
@@ -160,15 +160,15 @@ pandas==2.2.3
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
pathos==0.3.3
pathos==0.3.4
# via
# -c requirements/common-constraints.txt
# lox
pillow==11.1.0
pillow==11.2.1
# via
# -c requirements/common-constraints.txt
# matplotlib
pip==25.0.1
pip==25.1.1
# via
# -c requirements/common-constraints.txt
# pip-tools
@@ -184,11 +184,11 @@ pluggy==1.5.0
# via
# -c requirements/common-constraints.txt
# pytest
pox==0.3.5
pox==0.3.6
# via
# -c requirements/common-constraints.txt
# pathos
ppft==1.7.6.9
ppft==1.7.7
# via
# -c requirements/common-constraints.txt
# pathos
@@ -261,7 +261,7 @@ rich==14.0.0
# via
# -c requirements/common-constraints.txt
# typer
rsa==4.9
rsa==4.9.1
# via
# -c requirements/common-constraints.txt
# google-auth
@@ -269,7 +269,7 @@ semver==3.0.4
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
setuptools==78.1.0
setuptools==80.3.1
# via
# -c requirements/common-constraints.txt
# pip-tools
@@ -281,7 +281,7 @@ six==1.17.0
# via
# -c requirements/common-constraints.txt
# python-dateutil
typer==0.15.2
typer==0.15.3
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
@@ -297,11 +297,11 @@ urllib3==2.4.0
# via
# -c requirements/common-constraints.txt
# requests
uv==0.6.14
uv==0.7.2
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
virtualenv==20.30.0
virtualenv==20.31.1
# via
# -c requirements/common-constraints.txt
# pre-commit

View File

@@ -4,7 +4,7 @@ aiohappyeyeballs==2.6.1
# via
# -c requirements/common-constraints.txt
# aiohttp
aiohttp==3.11.16
aiohttp==3.11.18
# via
# -c requirements/common-constraints.txt
# huggingface-hub
@@ -25,17 +25,17 @@ attrs==25.3.0
# via
# -c requirements/common-constraints.txt
# aiohttp
banks==2.1.1
banks==2.1.2
# via
# -c requirements/common-constraints.txt
# llama-index-core
certifi==2025.1.31
certifi==2025.4.26
# via
# -c requirements/common-constraints.txt
# httpcore
# httpx
# requests
charset-normalizer==3.4.1
charset-normalizer==3.4.2
# via
# -c requirements/common-constraints.txt
# requests
@@ -70,7 +70,7 @@ filetype==1.2.0
# via
# -c requirements/common-constraints.txt
# llama-index-core
frozenlist==1.5.0
frozenlist==1.6.0
# via
# -c requirements/common-constraints.txt
# aiohttp
@@ -81,19 +81,19 @@ fsspec==2025.3.2
# huggingface-hub
# llama-index-core
# torch
greenlet==3.1.1
greenlet==3.2.1
# via
# -c requirements/common-constraints.txt
# sqlalchemy
griffe==1.7.2
griffe==1.7.3
# via
# -c requirements/common-constraints.txt
# banks
h11==0.14.0
h11==0.16.0
# via
# -c requirements/common-constraints.txt
# httpcore
httpcore==1.0.8
httpcore==1.0.9
# via
# -c requirements/common-constraints.txt
# httpx
@@ -120,7 +120,7 @@ jinja2==3.1.6
# -c requirements/common-constraints.txt
# banks
# torch
joblib==1.4.2
joblib==1.5.0
# via
# -c requirements/common-constraints.txt
# nltk
@@ -151,7 +151,7 @@ multidict==6.4.3
# -c requirements/common-constraints.txt
# aiohttp
# yarl
mypy-extensions==1.0.0
mypy-extensions==1.1.0
# via
# -c requirements/common-constraints.txt
# typing-inspect
@@ -182,7 +182,7 @@ packaging==24.2
# huggingface-hub
# marshmallow
# transformers
pillow==11.1.0
pillow==11.2.1
# via
# -c requirements/common-constraints.txt
# llama-index-core
@@ -196,12 +196,12 @@ propcache==0.3.1
# -c requirements/common-constraints.txt
# aiohttp
# yarl
pydantic==2.11.3
pydantic==2.11.4
# via
# -c requirements/common-constraints.txt
# banks
# llama-index-core
pydantic-core==2.33.1
pydantic-core==2.33.2
# via
# -c requirements/common-constraints.txt
# pydantic
@@ -237,7 +237,7 @@ scipy==1.13.1
# -c requirements/common-constraints.txt
# scikit-learn
# sentence-transformers
sentence-transformers==4.0.2
sentence-transformers==4.1.0
# via
# -c requirements/common-constraints.txt
# llama-index-embeddings-huggingface
@@ -249,7 +249,7 @@ sqlalchemy[asyncio]==2.0.40
# via
# -c requirements/common-constraints.txt
# llama-index-core
sympy==1.13.3
sympy==1.14.0
# via
# -c requirements/common-constraints.txt
# torch
@@ -282,7 +282,7 @@ tqdm==4.67.1
# nltk
# sentence-transformers
# transformers
transformers==4.51.2
transformers==4.51.3
# via
# -c requirements/common-constraints.txt
# sentence-transformers
@@ -317,7 +317,7 @@ wrapt==1.17.2
# -c requirements/common-constraints.txt
# deprecated
# llama-index-core
yarl==1.19.0
yarl==1.20.0
# via
# -c requirements/common-constraints.txt
# aiohttp

View File

@@ -1,14 +1,14 @@
# This file was autogenerated by uv via the following command:
# uv pip compile --no-strip-extras --constraint=requirements/common-constraints.txt --output-file=requirements/requirements-playwright.txt requirements/requirements-playwright.in
greenlet==3.1.1
greenlet==3.2.1
# via
# -c requirements/common-constraints.txt
# playwright
playwright==1.51.0
playwright==1.52.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-playwright.in
pyee==12.1.1
pyee==13.0.0
# via
# -c requirements/common-constraints.txt
# playwright

View File

@@ -28,6 +28,7 @@ watchfiles
socksio
pip
pillow
google-generativeai
# The proper dependency is networkx[default], but this brings
# in matplotlib and a bunch of other deps

258
scripts/clean_metadata.py Executable file
View File

@@ -0,0 +1,258 @@
#!/usr/bin/env python
import difflib
import json
import re
from pathlib import Path
import json5
def find_block_lines(lines, key_to_remove):
"""Finds the start and end line indices for a top-level key's block."""
start_line_idx = -1
# Regex to find the line starting the key definition, allowing for whitespace
# and ensuring it's the key we want (e.g., avoid matching "key1_extra": ...)
key_pattern = re.compile(r'^\s*"' + re.escape(key_to_remove) + r'"\s*:\s*{?')
for i, line in enumerate(lines):
if key_pattern.match(line.strip()):
start_line_idx = i
break
if start_line_idx == -1:
# Key might not start with '{' on the same line, check if it starts immediately after
key_pattern_no_brace = re.compile(r'^\s*"' + re.escape(key_to_remove) + r'"\s*:\s*$')
for i, line in enumerate(lines):
if key_pattern_no_brace.match(line.strip()):
# Look for the opening brace on the next non-empty/comment line
j = i + 1
while j < len(lines):
stripped_next_line = lines[j].strip()
if not stripped_next_line or stripped_next_line.startswith("//"):
j += 1
continue
if stripped_next_line.startswith("{"):
start_line_idx = i # Start from the key definition line
break
else:
# False alarm, the line after the key wasn't '{'
break
if start_line_idx != -1:
break
if start_line_idx == -1:
print(
f"Warning: Could not reliably find start line for '{key_to_remove}'. Skipping removal."
)
return None, None # Key block start not found clearly
brace_level = 0
in_string = False
block_started = False
end_line_idx = -1
# Start brace counting from the identified start line
for i in range(start_line_idx, len(lines)):
line = lines[i]
# Simple brace counting - might be fooled by braces in comments or strings
# This is a limitation of pure text processing without full parsing
for char_idx, char in enumerate(line):
# Rudimentary string detection
if char == '"':
# Check if preceded by an odd number of backslashes (escaped quote)
backslashes = 0
temp_idx = char_idx - 1
while temp_idx >= 0 and line[temp_idx] == "\\":
backslashes += 1
temp_idx -= 1
if backslashes % 2 == 0:
in_string = not in_string
if not in_string:
if char == "{":
brace_level += 1
block_started = True # Mark that we've entered the block
elif char == "}":
brace_level -= 1
# Check if the block ends *after* processing the entire line
if block_started and brace_level == 0:
end_line_idx = i
break
if end_line_idx == -1:
print(
f"Warning: Could not find end of block for '{key_to_remove}' starting at line"
f" {start_line_idx + 1}. Skipping removal."
)
return None, None # Block end not found
return start_line_idx, end_line_idx
def remove_block_surgically(file_path, key_to_remove):
"""Reads the file, removes the block for the key, writes back."""
try:
# Read with universal newlines, but keep track for writing
with open(file_path, "r") as f:
content = f.read()
lines = content.splitlines(keepends=True) # Keep original line endings
except Exception as e:
print(f"Error reading {file_path} for removal: {e}")
return False
start_idx, end_idx = find_block_lines(lines, key_to_remove)
if start_idx is None or end_idx is None:
return False # Error message already printed by find_block_lines
# Prepare the lines to be written, excluding the identified block
output_lines = lines[:start_idx] + lines[end_idx + 1 :]
# Note: Comma handling is omitted for simplicity. User may need manual fix.
try:
with open(file_path, "w") as f:
f.writelines(output_lines)
print(f"Successfully removed '{key_to_remove}' block and updated {file_path}.")
return True
except Exception as e:
print(f"Error writing updated data to {file_path} after removing {key_to_remove}: {e}")
return False
def main():
script_dir = Path(__file__).parent.resolve()
# Adjust path relative to the script's location in the aider repo
litellm_path = script_dir.parent / "../litellm/model_prices_and_context_window.json"
aider_path = script_dir / "../aider/resources/model-metadata.json"
if not litellm_path.exists():
print(f"Error: LiteLLM metadata file not found at {litellm_path}")
return
if not aider_path.exists():
print(f"Error: Aider metadata file not found at {aider_path}")
return
try:
with open(litellm_path, "r") as f:
litellm_data = json.load(f)
except json.JSONDecodeError as e:
print(f"Error decoding JSON from {litellm_path}: {e}")
return
except Exception as e:
print(f"Error reading {litellm_path}: {e}")
return
try:
# Use json5 for the aider metadata file as it might contain comments
with open(aider_path, "r") as f:
aider_data = json5.load(f)
except json.JSONDecodeError as e:
print(f"Error decoding JSON from {aider_path}: {e}")
return
except Exception as e:
print(f"Error reading {aider_path}: {e}")
return
litellm_keys = set(litellm_data.keys())
aider_keys = set(aider_data.keys())
common_keys = sorted(list(litellm_keys.intersection(aider_keys)))
removed_count = 0
if common_keys:
print("Comparing common models found in both files:\n")
for key in common_keys:
print(f"--- {key} (aider) ---")
print(f"+++ {key} (litellm) +++")
litellm_entry = litellm_data.get(key, {})
aider_entry = aider_data.get(key, {})
# Convert dicts to formatted JSON strings for comparison
# First, compare the dictionaries directly for semantic equality
if litellm_entry == aider_entry:
print(f"'{key}': Entries are semantically identical.")
print("\n" + "=" * 40)
print("-" * 40 + "\n") # Separator for the next model
continue # Skip diff and removal prompt for identical entries
# Generate unified diff
# If dictionaries differ, generate JSON strings to show the diff
# Add a dummy key to ensure the *real* last key gets a comma
litellm_entry_copy = litellm_entry.copy()
aider_entry_copy = aider_entry.copy()
dummy_key = "zzzdummykey"
litellm_entry_copy[dummy_key] = True
aider_entry_copy[dummy_key] = True
litellm_json_lines = json.dumps(
litellm_entry_copy, indent=4, sort_keys=True
).splitlines()
aider_json_lines = json.dumps(aider_entry_copy, indent=4, sort_keys=True).splitlines()
# Remove the dummy key line before diffing
litellm_json_filtered = [line for line in litellm_json_lines if dummy_key not in line]
aider_json_filtered = [line for line in aider_json_lines if dummy_key not in line]
diff = difflib.unified_diff(
aider_json_filtered,
litellm_json_filtered,
fromfile=f"{key} (aider)",
tofile=f"{key} (litellm)",
lineterm="",
n=max(len(litellm_json_filtered), len(aider_json_filtered)), # Show all lines
)
# Print the diff, skipping the header lines generated by unified_diff
diff_lines = list(diff)[2:]
if not diff_lines:
# This case should ideally not be reached if dict comparison was done first,
# but kept as a fallback.
print(
"(No textual differences found, though dictionaries might differ in type/order)"
)
else:
for line in diff_lines:
# Add color for better readability (optional, requires a library
# like 'termcolor' or manual ANSI codes)
# Simple +/- indication is standard for diffs
print(line)
print("\n" + "=" * 40)
# Ask user if they want to remove the entry from aider's metadata
response = (
input(f"Remove '{key}' from aider/resources/model-metadata.json? (y/N): ")
.strip()
.lower()
)
if response == "y":
# Perform surgical removal from the text file
if remove_block_surgically(aider_path, key):
removed_count += 1
# Optional: Also remove from the in-memory dict if needed later,
# but it's not strictly necessary if we reload or finish now.
# if key in aider_data: del aider_data[key]
else:
print(f"Failed to remove '{key}' block surgically.")
# Key might still be in aider_data if removal failed
else:
print(f"Keeping '{key}'.")
print("-" * 40 + "\n") # Separator for the next model
else:
print("No common models found between the two files.")
return # Exit if no common keys
# Final summary message
if removed_count > 0:
print(f"\nFinished comparing. A total of {removed_count} entr(y/ies) were removed.")
else:
print("\nFinished comparing. No entries were removed.")
if __name__ == "__main__":
main()

View File

@@ -108,29 +108,6 @@ Hope you like it!
edits = list(eb.find_original_update_blocks(edit))
self.assertEqual(edits, [("foo.txt", "Two\n", "Tooooo\n")])
def test_find_original_update_blocks_mangled_filename_w_source_tag(self):
source = "source"
edit = """
Here's the change:
<%s>foo.txt
<<<<<<< SEARCH
One
=======
Two
>>>>>>> REPLACE
</%s>
Hope you like it!
""" % (source, source)
fence = ("<%s>" % source, "</%s>" % source)
with self.assertRaises(ValueError) as cm:
_edits = list(eb.find_original_update_blocks(edit, fence))
self.assertIn("missing filename", str(cm.exception))
def test_find_original_update_blocks_quote_below_filename(self):
edit = """
Here's the change:
@@ -181,10 +158,11 @@ Tooooo
oops!
>>>>>>> REPLACE
"""
with self.assertRaises(ValueError) as cm:
list(eb.find_original_update_blocks(edit))
_blocks = list(eb.find_original_update_blocks(edit))
self.assertIn("filename", str(cm.exception))
def test_find_original_update_blocks_no_final_newline(self):
@@ -575,7 +553,7 @@ Hope you like it!
edits = list(eb.find_original_update_blocks(edit, fence=quad_backticks))
self.assertEqual(edits, [("foo.txt", "", "Tooooo\n")])
#Test for shell script blocks with sh language identifier (issue #3785)
# Test for shell script blocks with sh language identifier (issue #3785)
def test_find_original_update_blocks_with_sh_language_identifier(self):
# https://github.com/Aider-AI/aider/issues/3785
edit = """
@@ -609,13 +587,13 @@ exit 0
# Check that the content contains the expected shell script elements
result_content = edits[0][2]
self.assertIn("#!/bin/bash", result_content)
self.assertIn("if [ \"$#\" -ne 1 ];", result_content)
self.assertIn("echo \"Usage: $0 <argument>\"", result_content)
self.assertIn('if [ "$#" -ne 1 ];', result_content)
self.assertIn('echo "Usage: $0 <argument>"', result_content)
self.assertIn("exit 1", result_content)
self.assertIn("echo \"$1\"", result_content)
self.assertIn('echo "$1"', result_content)
self.assertIn("exit 0", result_content)
#Test for C# code blocks with csharp language identifier
# Test for C# code blocks with csharp language identifier
def test_find_original_update_blocks_with_csharp_language_identifier(self):
edit = """
Here's a C# code change:
@@ -631,12 +609,9 @@ Console.WriteLine("Hello, C# World!");
"""
edits = list(eb.find_original_update_blocks(edit))
search_text = "Console.WriteLine(\"Hello World!\");\n"
replace_text = "Console.WriteLine(\"Hello, C# World!\");\n"
self.assertEqual(
edits,
[("Program.cs", search_text, replace_text)]
)
search_text = 'Console.WriteLine("Hello World!");\n'
replace_text = 'Console.WriteLine("Hello, C# World!");\n'
self.assertEqual(edits, [("Program.cs", search_text, replace_text)])
if __name__ == "__main__":