Compare commits

...

133 Commits

Author SHA1 Message Date
Paul Gauthier
9518193d0a set version to 0.81.4.dev 2025-04-12 13:25:55 -07:00
Paul Gauthier
60a2b799e6 version bump to 0.81.3 2025-04-12 13:25:52 -07:00
Paul Gauthier
9d7dc00f25 copy 2025-04-12 08:58:50 -07:00
Paul Gauthier
882e7b6716 bump deps 2025-04-12 08:58:09 -07:00
Paul Gauthier
8ba29ee8e6 copy 2025-04-12 08:57:18 -07:00
Paul Gauthier
3f67c41759 copy 2025-04-12 08:09:46 -07:00
Paul Gauthier
7fbeafa1cf copy 2025-04-12 08:06:39 -07:00
Paul Gauthier
028257480b copy 2025-04-11 14:26:04 +12:00
Paul Gauthier
e42a0c45b6 Merge branch 'main' of github.com:Aider-AI/aider 2025-04-11 14:18:07 +12:00
Paul Gauthier
1e7f8549ff add grok3mini high 2025-04-11 14:11:39 +12:00
paul-gauthier
668de71f9d Merge pull request #3776 from peterhadlaw/master
Do not lowercase the _entirety_ of the commit message
2025-04-11 14:01:03 +12:00
Paul Gauthier
067245b810 chore: update grok model settings to remove/comment out params 2025-04-11 13:41:11 +12:00
Peter Hadlaw
8f236c69e1 fix: Do not lowercase the entirety of the commit message 2025-04-10 20:08:40 -05:00
Paul Gauthier
8ee33da114 copy 2025-04-11 09:08:06 +12:00
Paul Gauthier (aider)
2fedc2e699 feat: update openrouter badges to link to options menu 2025-04-11 08:38:22 +12:00
Paul Gauthier
1961543e2f set version to 0.81.3.dev 2025-04-11 08:37:59 +12:00
Paul Gauthier
b4f65734a5 version bump to 0.81.2 2025-04-11 08:37:56 +12:00
Paul Gauthier (aider)
0eb80553f6 style: Apply linter to versionbump.py 2025-04-11 08:37:37 +12:00
Paul Gauthier (aider)
110c63ae95 feat: Add --force flag to skip pre-push checks 2025-04-11 08:37:33 +12:00
Paul Gauthier
57304536bf copy 2025-04-11 08:23:16 +12:00
Paul Gauthier
a9ca5da139 feat: add grok-3-mini-beta to model settings with reasoning_effort 2025-04-11 08:22:44 +12:00
Paul Gauthier
947aebfbe0 copy 2025-04-11 08:13:15 +12:00
Paul Gauthier
fafc9268d4 feat: set grok-3-mini-beta edit_format to whole 2025-04-11 08:12:38 +12:00
Paul Gauthier
65a5d55436 feat: add grok-3-beta and grok-3-mini-beta model settings 2025-04-11 08:10:18 +12:00
Paul Gauthier (aider)
96b350400f feat: make highlight parameter case-insensitive in leaderboard 2025-04-11 07:28:01 +12:00
Paul Gauthier
7983b4caf2 copy 2025-04-11 07:25:10 +12:00
Paul Gauthier (aider)
e44122f1be fix: correct groq to grok typo in model settings yaml 2025-04-11 07:23:16 +12:00
Paul Gauthier (aider)
42618d7ec6 fix: correct groq to grok typos in model names and aliases 2025-04-11 07:23:05 +12:00
Paul Gauthier
1d0167bbf4 feat: add polyglot leaderboard entries for grok3 and optimus alpha 2025-04-11 07:17:56 +12:00
Paul Gauthier (aider)
43d4b21b23 feat: add "optimus" alias for openrouter model 2025-04-11 07:12:27 +12:00
Paul Gauthier (aider)
562171c548 feat: add grok3 alias for xai/groq-3-beta 2025-04-11 07:12:09 +12:00
Paul Gauthier (aider)
8dccecdd9f feat: add xai/groq-3-beta and xai/groq-3-mini-beta models 2025-04-11 07:10:23 +12:00
Paul Gauthier
940ae364d7 feat: add model settings for grok-3-beta, grok-3-mini-beta, optimus-alpha 2025-04-11 07:09:27 +12:00
Paul Gauthier (aider)
532bc454c5 feat: add openrouter/openrouter/optimus-alpha model metadata 2025-04-11 06:54:12 +12:00
Paul Gauthier (aider)
14ffe7782c feat: add openrouter/x-ai/grok-3-mini-beta model metadata 2025-04-11 06:37:33 +12:00
Paul Gauthier (aider)
2dd40fce44 feat: add openrouter/x-ai/grok-3-beta model metadata 2025-04-10 16:18:22 +12:00
Paul Gauthier (aider)
0c8bc46e28 fix: strip trailing } from urls extracted from error messages 2025-04-10 16:06:07 +12:00
Paul Gauthier
7d0dd29937 Merge branch 'main' of github.com:Aider-AI/aider 2025-04-08 18:33:41 +12:00
Paul Gauthier
349cd77821 copy 2025-04-08 08:10:21 +12:00
paul-gauthier
dc2d7b1dfe Merge pull request #3752 from tylersatre/main 2025-04-08 06:31:41 +12:00
Tyler Satre
be30329288 Update azure documentation 2025-04-07 11:04:15 -04:00
Paul Gauthier
71446d9f3c fix: get_file_mentions skips pure basenames with duplicates 2025-04-07 13:22:05 +12:00
Paul Gauthier (aider)
c9d4c8d09b fix: allow adding files by full path with existing basename 2025-04-07 13:19:25 +12:00
Paul Gauthier (aider)
c580ffdb70 test: add test for multiline backtick file mentions 2025-04-07 13:14:27 +12:00
Paul Gauthier
f46deb4eb7 improve diff-fenced prompts 2025-04-07 08:54:01 +12:00
Paul Gauthier
b3215bed48 copy 2025-04-07 08:13:22 +12:00
Paul Gauthier
2a9ab02753 chore: update polyglot leaderboard data 2025-04-07 08:12:37 +12:00
Paul Gauthier (aider)
0da586154d fix: quote values with '#' in sample aider.conf.yml config 2025-04-07 08:09:33 +12:00
Paul Gauthier
26d736551d Merge branch 'main' of github.com:Aider-AI/aider 2025-04-07 08:05:15 +12:00
paul-gauthier
9445a3118b Merge pull request #3733 from banjo/main 2025-04-06 07:28:56 +12:00
paul-gauthier
a2c46c7436 Merge pull request #3735 from KennyDizi/main 2025-04-06 07:20:22 +12:00
paul-gauthier
8df7a0960e Merge pull request #3736 from FelixLisczyk/tl-173 2025-04-06 07:19:30 +12:00
Felix Lisczyk
e7f35e7a35 Add Fireworks AI model 'deepseek-v3-0324' 2025-04-05 16:33:09 +02:00
Kenny Dizi
088e80e38b Add support model openrouter/google/gemini-2.5-pro-preview-03-25 2025-04-05 20:53:06 +07:00
Kenny Dizi
2d65c7f387 Remove trailing spaces 2025-04-05 20:51:11 +07:00
Anton Ödman
94db758eb7 fix: follow conventional commits examples by going all lowercase 2025-04-05 13:07:34 +02:00
Paul Gauthier
2bfb615d68 set version to 0.81.2.dev 2025-04-05 09:01:12 +13:00
Paul Gauthier
87275140f9 version bump to 0.81.1 2025-04-05 09:01:09 +13:00
Paul Gauthier
0672a68ba4 copy 2025-04-05 08:58:10 +13:00
Paul Gauthier (aider)
246e3ccfad refactor: Rename gemini-free alias to gemini-exp in MODEL_ALIASES 2025-04-05 08:56:56 +13:00
Paul Gauthier (aider)
b275ee919f feat: Update gemini model alias and add gemini-free alias 2025-04-05 08:56:35 +13:00
Paul Gauthier (aider)
eda796d5e0 feat: Add metadata and settings for gemini-2.5-pro-preview-03-25 2025-04-05 08:54:45 +13:00
Paul Gauthier
d1b3917309 copy 2025-04-04 22:08:49 +13:00
Paul Gauthier
ffee2b971f blame 2025-04-04 22:08:08 +13:00
Paul Gauthier
b9a80f9c8c set version to 0.81.1.dev 2025-04-04 22:02:37 +13:00
Paul Gauthier
980f673ce2 version bump to 0.81.0 2025-04-04 22:02:34 +13:00
Paul Gauthier
55767a0003 copy 2025-04-04 21:56:25 +13:00
Paul Gauthier
fb44bebe40 copy 2025-04-04 21:53:47 +13:00
Paul Gauthier (aider)
b79f072499 feat: Add alias "quasar" for openrouter/openrouter/quasar-alpha 2025-04-04 21:53:12 +13:00
Paul Gauthier (aider)
d65a2e8b51 fix: Exclude double quotes from detected URLs 2025-04-04 21:48:17 +13:00
Paul Gauthier (aider)
e0b42d51db fix: Do not retry litellm.APIError for insufficient credits. 2025-04-04 21:45:56 +13:00
Paul Gauthier (aider)
c057dc9466 Feat: Add model metadata for openrouter/openrouter/quasar-alpha 2025-04-04 21:43:19 +13:00
Paul Gauthier (aider)
fff53a94d3 fix: Import offer_openrouter_oauth from aider/onboarding.py 2025-04-04 21:41:40 +13:00
Paul Gauthier (aider)
12beedd0a6 style: Run linter to fix line lengths and formatting issues 2025-04-04 21:40:47 +13:00
Paul Gauthier (aider)
80f60a7394 feat: Offer OpenRouter OAuth if model specified but API key is missing 2025-04-04 21:40:41 +13:00
Paul Gauthier
2359348505 copy 2025-04-04 21:39:47 +13:00
Paul Gauthier
63e3e06a8c copy 2025-04-04 18:53:12 +13:00
Paul Gauthier
dca92b580c add openrouter/openrouter/quasar-alpha 2025-04-04 18:52:59 +13:00
Paul Gauthier
24e2960092 add openrouter/openrouter/quasar-alpha 2025-04-04 18:52:16 +13:00
Paul Gauthier (aider)
be1a52c5c1 feat: Read highlight model from query string 2025-04-04 16:11:54 +13:00
Paul Gauthier
8a34a6c8f4 set version to 0.80.5.dev 2025-04-04 15:34:08 +13:00
Paul Gauthier
7924ea9bb9 version bump to 0.80.4 2025-04-04 15:34:04 +13:00
Paul Gauthier
a3a17ae792 copy 2025-04-04 15:31:04 +13:00
Paul Gauthier
f8801d811b feat: Remove max_tokens from deepseek model settings 2025-04-04 15:25:36 +13:00
Paul Gauthier
425284ac62 copy 2025-04-04 15:09:08 +13:00
Paul Gauthier
4872cdf905 copy 2025-04-04 15:08:21 +13:00
Paul Gauthier
88cd81c692 set version to 0.80.4.dev 2025-04-04 08:30:42 +13:00
Paul Gauthier
d45ecd0800 version bump to 0.80.3 2025-04-04 08:30:39 +13:00
Paul Gauthier
4bfcef60f4 copy 2025-04-04 07:58:59 +13:00
Paul Gauthier
e9b7e933f5 copy 2025-04-04 07:54:07 +13:00
Paul Gauthier
e5301cef49 copy 2025-04-04 07:52:16 +13:00
Paul Gauthier
01ca552174 copy 2025-04-04 07:49:36 +13:00
Paul Gauthier
4529d73bf3 feat: Add model metadata for openrouter/google/gemini-2.0-flash-exp:free 2025-04-03 08:43:19 +13:00
Paul Gauthier
0798906a51 Merge branch 'main' into gemini-weak-flash 2025-04-03 08:34:41 +13:00
Paul Gauthier
8547c24dac set version to 0.80.3.dev 2025-04-03 08:33:40 +13:00
Paul Gauthier
0e1e1aae2e version bump to 0.80.2 2025-04-03 08:33:36 +13:00
Paul Gauthier (aider)
9cc31e4087 feat: Configure weak models for Gemini 2.5 Pro 2025-04-03 08:12:27 +13:00
Paul Gauthier
e9c7555bb9 chore: Add TODO comment for Gemini 2.5 Pro models 2025-04-03 08:12:20 +13:00
Paul Gauthier
6f897fec59 copy 2025-04-03 08:10:13 +13:00
Paul Gauthier
8c3d77f4c7 bump deps to pickup https://github.com/BerriAI/litellm/pull/9667 2025-04-03 08:07:30 +13:00
Paul Gauthier
f9b60d83ac copy 2025-04-02 20:15:37 +13:00
Paul Gauthier (aider)
3992681b84 ci: Add Windows workflow to check PyPI version 2025-04-01 21:19:14 +13:00
Paul Gauthier
340bd78259 Revert "ci: Add Windows to check_pypi_version matrix and improve compatibility"
This reverts commit 12a46275a2.
2025-04-01 21:18:31 +13:00
Paul Gauthier (aider)
12a46275a2 ci: Add Windows to check_pypi_version matrix and improve compatibility 2025-04-01 21:17:28 +13:00
Paul Gauthier
b56234f1c9 copy 2025-04-01 21:15:25 +13:00
Paul Gauthier (aider)
60859ec2b9 ci: Fix latest tag detection to exclude dev tags 2025-04-01 21:14:24 +13:00
Paul Gauthier
0a840860f1 docs: Add comment explaining PyPI check workflow purpose 2025-04-01 21:14:17 +13:00
Paul Gauthier (aider)
cebae18dd6 ci: Correct version extraction in check_pypi_version workflow 2025-04-01 21:12:24 +13:00
Paul Gauthier (aider)
9c9c6b6591 ci: Improve robustness of aider version check in CI 2025-04-01 21:10:36 +13:00
Paul Gauthier (aider)
ca0ffc66d1 ci: Run check_pypi_version job across Python 3.9-3.12 2025-04-01 21:08:17 +13:00
Paul Gauthier (aider)
b0623f04fe ci: Add GitHub Action to verify PyPI version matches latest tag 2025-04-01 21:03:20 +13:00
Paul Gauthier
2dec862ea6 copy 2025-04-01 17:08:27 +13:00
Paul Gauthier
f18fe53a9a set version to 0.80.2.dev 2025-04-01 17:06:41 +13:00
Paul Gauthier
73348de2b4 version bump to 0.80.1 2025-04-01 17:06:37 +13:00
Paul Gauthier
f4a418bfcd copy 2025-04-01 17:03:58 +13:00
Paul Gauthier
50588800f5 copy 2025-04-01 16:15:19 +13:00
Paul Gauthier
2762215d66 copy 2025-04-01 16:14:02 +13:00
Paul Gauthier
4e53797aac Merge branch 'main' of github.com:Aider-AI/aider 2025-04-01 16:13:21 +13:00
Paul Gauthier
b24ac4b3a2 pin to avoid yanked versions #3699 2025-04-01 16:13:13 +13:00
paul-gauthier
88ab6afd3e Merge pull request #3698 from aj47/patch-1
Update benchmark README.md to specify how to config other settings
2025-04-01 15:27:38 +13:00
Paul Gauthier
5c5db0a961 noop 2025-04-01 15:27:05 +13:00
AJ (@techfren)
587186d96c Update benchmark README.md to specify how to config other settings 2025-03-31 17:05:53 -07:00
Paul Gauthier
d9ddf93f83 copy 2025-04-01 08:37:08 +13:00
Paul Gauthier
d3882d3513 Merge branch 'main' of github.com:Aider-AI/aider 2025-04-01 08:28:00 +13:00
paul-gauthier
a458215bbb Merge pull request #3692 from claui/requests 2025-04-01 06:53:26 +13:00
Claudia Pellegrino
7ae0fa3775 chore: remove redundant code
1. The module already imports `requests`, so by the time this check is
   called, the module is already loaded.

2. Even if the code path were taken, it would fail anyway, because the
   `aider[oauth]` extra was hallucinated and does not exist.

3. Downstream distributions usually have managed Python environments,
   where pip cannot be used at all.
   That means distros must patch out every such pip invocation
   (example: [1]; full disclosure: I maintain this but other distros
   will eventually bump into the same issues). Restricting at-runtime
   pip usage to the minimum necessary is friendlier to distro
   maintainers.

[1]: https://aur.archlinux.org/cgit/aur.git/tree/archlinux-use-system.patch?h=aider-chat&id=7f8156946857215104bce151454ad0101ade4a48
2025-03-31 19:13:41 +02:00
Paul Gauthier
f1695f8b15 copy 2025-03-31 19:56:59 +13:00
Paul Gauthier
4c08bbb9e5 copy 2025-03-31 19:34:36 +13:00
Paul Gauthier
9b55ff8c4c copy 2025-03-31 19:32:36 +13:00
Paul Gauthier
2096d2b786 copy 2025-03-31 19:27:29 +13:00
Paul Gauthier
70196cd6fd copy 2025-03-31 16:24:13 +13:00
Paul Gauthier
c2cba97722 copy 2025-03-31 14:32:36 +13:00
Paul Gauthier
7534ebd145 blame 2025-03-31 14:28:44 +13:00
47 changed files with 2340 additions and 1200 deletions

View File

@@ -0,0 +1,86 @@
name: Check PyPI Version
# Check to be sure `pip install aider-chat` installs the most recently published version.
# If dependencies get yanked, it may render the latest version uninstallable.
# See https://github.com/Aider-AI/aider/issues/3699 for example.
on:
schedule:
# Run once a day at midnight UTC
- cron: '0 0 * * *'
workflow_dispatch: # Allows manual triggering
jobs:
check_version:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
steps:
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install aider-chat
run: pip install aider-chat
- name: Get installed aider version
id: installed_version
run: |
set -x # Enable debugging output
aider_version_output=$(aider --version)
if [ $? -ne 0 ]; then
echo "Error: 'aider --version' command failed."
exit 1
fi
echo "Raw aider --version output: $aider_version_output"
# Extract version number (format X.Y.Z)
version_num=$(echo "$aider_version_output" | grep -oP '\d+\.\d+\.\d+')
# Check if grep found anything
if [ -z "$version_num" ]; then
echo "Error: Could not extract version number using grep -oP '\d+\.\d+\.\d+' from output: $aider_version_output"
exit 1
fi
echo "Extracted version number: $version_num"
echo "version=$version_num" >> $GITHUB_OUTPUT
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch all history for all tags
- name: Get latest tag
id: latest_tag
run: |
set -x # Enable debugging output
# Fetch all tags from remote just in case
git fetch --tags origin main
# Get the latest tag that strictly matches vX.Y.Z (no suffixes like .dev)
# List all tags, sort by version descending, filter for exact pattern, take the first one
latest_tag=$(git tag --sort=-v:refname | grep -P '^v\d+\.\d+\.\d+$' | head -n 1)
if [ -z "$latest_tag" ]; then
echo "Error: Could not find any tags matching the pattern '^v\d+\.\d+\.\d+$'"
exit 1
fi
echo "Latest non-dev tag: $latest_tag"
# Remove 'v' prefix for comparison
tag_num=${latest_tag#v}
echo "Extracted tag number: $tag_num"
echo "tag=$tag_num" >> $GITHUB_OUTPUT
- name: Compare versions
run: |
echo "Installed version: ${{ steps.installed_version.outputs.version }}"
echo "Latest tag version: ${{ steps.latest_tag.outputs.tag }}"
if [ "${{ steps.installed_version.outputs.version }}" != "${{ steps.latest_tag.outputs.tag }}" ]; then
echo "Error: Installed aider version (${{ steps.installed_version.outputs.version }}) does not match the latest tag (${{ steps.latest_tag.outputs.tag }})."
exit 1
fi
echo "Versions match."

View File

@@ -0,0 +1,90 @@
name: Windows Check PyPI Version
# Check to be sure `pip install aider-chat` installs the most recently published version on Windows.
# If dependencies get yanked, it may render the latest version uninstallable.
# See https://github.com/Aider-AI/aider/issues/3699 for example.
on:
schedule:
# Run once a day at 1 AM UTC (offset from Ubuntu check)
- cron: '0 1 * * *'
workflow_dispatch: # Allows manual triggering
jobs:
check_version:
runs-on: windows-latest
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
defaults:
run:
shell: pwsh # Use PowerShell for all run steps
steps:
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install aider-chat
run: pip install aider-chat
- name: Get installed aider version
id: installed_version
run: |
Write-Host "Running 'aider --version'..."
$aider_version_output = aider --version
if ($LASTEXITCODE -ne 0) {
Write-Error "Error: 'aider --version' command failed."
exit 1
}
Write-Host "Raw aider --version output: $aider_version_output"
# Extract version number (format X.Y.Z) using PowerShell regex
$match = [regex]::Match($aider_version_output, '\d+\.\d+\.\d+')
if (-not $match.Success) {
Write-Error "Error: Could not extract version number using regex '\d+\.\d+\.\d+' from output: $aider_version_output"
exit 1
}
$version_num = $match.Value
Write-Host "Extracted version number: $version_num"
echo "version=$version_num" >> $env:GITHUB_OUTPUT
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch all history for all tags
- name: Get latest tag
id: latest_tag
run: |
Write-Host "Fetching tags..."
# Fetch all tags from remote just in case
git fetch --tags origin main
Write-Host "Getting latest non-dev tag..."
# Get the latest tag that strictly matches vX.Y.Z (no suffixes like .dev)
# List all tags, sort by version descending, filter for exact pattern, take the first one
$latest_tag = (git tag --sort=-v:refname | Select-String -Pattern '^v\d+\.\d+\.\d+$' | Select-Object -First 1).Line
if (-not $latest_tag) {
Write-Error "Error: Could not find any tags matching the pattern '^v\d+\.\d+\.\d+$'"
exit 1
}
Write-Host "Latest non-dev tag: $latest_tag"
# Remove 'v' prefix for comparison
$tag_num = $latest_tag.Substring(1)
Write-Host "Extracted tag number: $tag_num"
echo "tag=$tag_num" >> $env:GITHUB_OUTPUT
- name: Compare versions
run: |
Write-Host "Installed version: ${{ steps.installed_version.outputs.version }}"
Write-Host "Latest tag version: ${{ steps.latest_tag.outputs.tag }}"
if ("${{ steps.installed_version.outputs.version }}" -ne "${{ steps.latest_tag.outputs.tag }}") {
Write-Error "Error: Installed aider version (${{ steps.installed_version.outputs.version }}) does not match the latest tag (${{ steps.latest_tag.outputs.tag }})."
exit 1
}
Write-Host "Versions match."

View File

@@ -1,7 +1,60 @@
# Release history
### main branch
- Add the `openrouter/deepseek-chat-v3-0324:free` model.
- Commit messages generated by aider are no longer forced to be entirely lowercase, by Peter Hadlaw.
- Updated default settings for Grok models.
- Aider wrote 64% of the code in this release.
### Aider v0.81.2
- Add support for `xai/grok-3-beta`, `xai/grok-3-mini-beta`, `openrouter/x-ai/grok-3-beta`, `openrouter/x-ai/grok-3-mini-beta`, and `openrouter/openrouter/optimus-alpha` models.
- Add alias "grok3" for `xai/grok-3-beta`.
- Add alias "optimus" for `openrouter/openrouter/optimus-alpha`.
- Fix URL extraction from error messages.
- Allow adding files by full path even if a file with the same basename is already in the chat.
- Fix quoting of values containing '#' in the sample `aider.conf.yml`.
- Add support for Fireworks AI model 'deepseek-v3-0324', by Felix Lisczyk.
- Commit messages generated by aider are now lowercase, by Anton Ödman.
- Aider wrote 64% of the code in this release.
### Aider v0.81.1
- Added support for the `gemini/gemini-2.5-pro-preview-03-25` model.
- Updated the `gemini` alias to point to `gemini/gemini-2.5-pro-preview-03-25`.
- Added the `gemini-exp` alias for `gemini/gemini-2.5-pro-exp-03-25`.
- Aider wrote 87% of the code in this release.
### Aider v0.81.0
- Added support for the `openrouter/openrouter/quasar-alpha` model.
- Run with `aider --model quasar`
- Offer OpenRouter OAuth authentication if an OpenRouter model is specified but the API key is missing.
- Prevent retrying API calls when the provider reports insufficient credits.
- Improve URL detection to exclude trailing double quotes.
- Aider wrote 86% of the code in this release.
### Aider v0.80.4
- Bumped deps to pickup litellm change to properly display the root cause of OpenRouter "choices" errors.
### Aider v0.80.3
- Improve error message for OpenRouter API connection issues to mention potential rate limiting or upstream provider issues.
- Configure weak models (`gemini/gemini-2.0-flash` and `openrouter/google/gemini-2.0-flash-exp:free`) for Gemini 2.5 Pro models.
- Add model metadata for `openrouter/google/gemini-2.0-flash-exp:free`.
### Aider v0.80.2
- Bumped deps.
### Aider v0.80.1
- Updated deps for yanked fsspec and aiohttp packages #3699
- Removed redundant dependency check during OpenRouter OAuth flow, by Claudia Pellegrino.
### Aider v0.80.0
- OpenRouter OAuth integration:
- Offer to OAuth against OpenRouter if no model and keys are provided.
- Select OpenRouter default model based on free/paid tier status if `OPENROUTER_API_KEY` is set and no model is specified.
@@ -14,6 +67,7 @@
- Update edit format to the new model's default when switching models with `/model`, if the user was using the old model's default format.
- Add `Ctrl-X Ctrl-E` keybinding to edit the current input buffer in an external editor, by Matteo Landi.
- Fix linting errors for filepaths containing shell metacharacters, by Mir Adnan ALI.
- Add the `openrouter/deepseek-chat-v3-0324:free` model.
- Add repomap support for the Scala language, by Vasil Markoukin.
- Fixed bug in `/run` that was preventing auto-testing.
- Fix bug preventing `UnboundLocalError` during git tree traversal.

View File

@@ -27,13 +27,13 @@ cog.out(text)
<a href="https://github.com/Aider-AI/aider/stargazers"><img alt="GitHub Stars" title="Total number of GitHub stars the Aider project has received"
src="https://img.shields.io/github/stars/Aider-AI/aider?style=flat-square&logo=github&color=f1c40f&labelColor=555555"/></a>
<a href="https://pypi.org/project/aider-chat/"><img alt="PyPI Downloads" title="Total number of installations via pip from PyPI"
src="https://img.shields.io/badge/📦%20Installs-1.7M-2ecc71?style=flat-square&labelColor=555555"/></a>
src="https://img.shields.io/badge/📦%20Installs-1.9M-2ecc71?style=flat-square&labelColor=555555"/></a>
<img alt="Tokens per week" title="Number of tokens processed weekly by Aider users"
src="https://img.shields.io/badge/📈%20Tokens%2Fweek-15B-3498db?style=flat-square&labelColor=555555"/>
<a href="https://openrouter.ai/"><img alt="OpenRouter Ranking" title="Aider's ranking among applications on the OpenRouter platform"
<a href="https://openrouter.ai/#options-menu"><img alt="OpenRouter Ranking" title="Aider's ranking among applications on the OpenRouter platform"
src="https://img.shields.io/badge/🏆%20OpenRouter-Top%2020-9b59b6?style=flat-square&labelColor=555555"/></a>
<a href="https://aider.chat/HISTORY.html"><img alt="Singularity" title="Percentage of the new code in Aider's last release written by Aider itself"
src="https://img.shields.io/badge/🔄%20Singularity-65%25-e74c3c?style=flat-square&labelColor=555555"/></a>
src="https://img.shields.io/badge/🔄%20Singularity-86%25-e74c3c?style=flat-square&labelColor=555555"/></a>
<!--[[[end]]]-->
</p>

View File

@@ -1,6 +1,6 @@
from packaging import version
__version__ = "0.80.1.dev"
__version__ = "0.81.4.dev"
safe_version = __version__
try:

View File

@@ -143,7 +143,10 @@ class YamlHelpFormatter(argparse.HelpFormatter):
default = "true"
if default:
parts.append(f"#{switch}: {default}\n")
if "#" in default:
parts.append(f'#{switch}: "{default}"\n')
else:
parts.append(f"#{switch}: {default}\n")
elif action.nargs in ("*", "+") or isinstance(action, argparse._AppendAction):
parts.append(f"#{switch}: xxx")
parts.append("## Specify multiple values like this:")

View File

@@ -922,10 +922,11 @@ class Coder:
else:
self.io.tool_error(text)
url_pattern = re.compile(r"(https?://[^\s/$.?#].[^\s]*)")
# Exclude double quotes from the matched URL characters
url_pattern = re.compile(r'(https?://[^\s/$.?#].[^\s"]*)')
urls = list(set(url_pattern.findall(text))) # Use set to remove duplicates
for url in urls:
url = url.rstrip(".',\"")
url = url.rstrip(".',\"}") # Added } to the characters to strip
self.io.offer_url(url)
return urls
@@ -934,7 +935,8 @@ class Coder:
if not self.detect_urls:
return inp
url_pattern = re.compile(r"(https?://[^\s/$.?#].[^\s]*[^\s,.])")
# Exclude double quotes from the matched URL characters
url_pattern = re.compile(r'(https?://[^\s/$.?#].[^\s"]*[^\s,.])')
urls = list(set(url_pattern.findall(inp))) # Use set to remove duplicates
group = ConfirmGroup(urls)
for url in urls:
@@ -1624,10 +1626,6 @@ class Coder:
mentioned_rel_fnames = set()
fname_to_rel_fnames = {}
for rel_fname in addable_rel_fnames:
# Skip files that share a basename with files already in chat
if os.path.basename(rel_fname) in existing_basenames:
continue
normalized_rel_fname = rel_fname.replace("\\", "/")
normalized_words = set(word.replace("\\", "/") for word in words)
if normalized_rel_fname in normalized_words:
@@ -1642,6 +1640,10 @@ class Coder:
fname_to_rel_fnames[fname].append(rel_fname)
for fname, rel_fnames in fname_to_rel_fnames.items():
# If the basename is already in chat, don't add based on a basename mention
if fname in existing_basenames:
continue
# If the basename mention is unique among addable files and present in the text
if len(rel_fnames) == 1 and fname in words:
mentioned_rel_fnames.add(rel_fnames[0])

View File

@@ -19,7 +19,7 @@ class EditBlockFencedPrompts(EditBlockPrompts):
Here are the *SEARCH/REPLACE* blocks:
{fence[0]}
{fence[0]}python
mathweb/flask/app.py
<<<<<<< SEARCH
from flask import Flask
@@ -29,7 +29,7 @@ from flask import Flask
>>>>>>> REPLACE
{fence[1]}
{fence[0]}
{fence[0]}python
mathweb/flask/app.py
<<<<<<< SEARCH
def factorial(n):
@@ -44,7 +44,7 @@ def factorial(n):
>>>>>>> REPLACE
{fence[1]}
{fence[0]}
{fence[0]}python
mathweb/flask/app.py
<<<<<<< SEARCH
return str(factorial(n))
@@ -68,7 +68,7 @@ mathweb/flask/app.py
Here are the *SEARCH/REPLACE* blocks:
{fence[0]}
{fence[0]}python
hello.py
<<<<<<< SEARCH
=======
@@ -79,7 +79,7 @@ def hello():
>>>>>>> REPLACE
{fence[1]}
{fence[0]}
{fence[0]}python
main.py
<<<<<<< SEARCH
def hello():
@@ -93,3 +93,50 @@ from hello import hello
""",
),
]
system_reminder = """# *SEARCH/REPLACE block* Rules:
Every *SEARCH/REPLACE block* must use this format:
1. The opening fence and code language, eg: {fence[0]}python
2. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.
3. The start of search block: <<<<<<< SEARCH
4. A contiguous chunk of lines to search for in the existing source code
5. The dividing line: =======
6. The lines to replace into the source code
7. The end of the replace block: >>>>>>> REPLACE
8. The closing fence: {fence[1]}
Use the *FULL* file path, as shown to you by the user.
{quad_backtick_reminder}
Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.
If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.
*SEARCH/REPLACE* blocks will *only* replace the first match occurrence.
Including multiple unique *SEARCH/REPLACE* blocks if needed.
Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.
Keep *SEARCH/REPLACE* blocks concise.
Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.
Include just the changing lines, and a few surrounding lines if needed for uniqueness.
Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.
Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!
To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.
Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.
If you want to put code in a new file, use a *SEARCH/REPLACE block* with:
- A new file path, including dir name if needed
- An empty `SEARCH` section
- The new file's contents in the `REPLACE` section
To rename files which have been added to the chat, use shell commands at the end of your response.
If the user just says something like "ok" or "go ahead" or "do that" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.
The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.
{lazy_prompt}
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
{shell_cmd_reminder}
"""

View File

@@ -85,6 +85,23 @@ class LiteLLMExceptions:
return ExInfo("APIConnectionError", False, "You need to: pip install boto3")
if "OpenrouterException" in str(ex) and "'choices'" in str(ex):
return ExInfo(
"APIConnectionError", True, "The OpenRouter API provider is down or overloaded."
"APIConnectionError",
True,
(
"OpenRouter or the upstream API provider is down, overloaded or rate"
" limiting your requests."
),
)
# Check for specific non-retryable APIError cases like insufficient credits
if ex.__class__ is litellm.APIError:
err_str = str(ex).lower()
if "insufficient credits" in err_str and '"code":402' in err_str:
return ExInfo(
"APIError",
False,
"Insufficient credits with the API provider. Please add credits.",
)
# Fall through to default APIError handling if not the specific credits error
return self.exceptions.get(ex.__class__, ExInfo(None, None, None))

View File

@@ -30,7 +30,7 @@ from aider.history import ChatSummary
from aider.io import InputOutput
from aider.llm import litellm # noqa: F401; properly init litellm on launch
from aider.models import ModelSettings
from aider.onboarding import select_default_model
from aider.onboarding import offer_openrouter_oauth, select_default_model
from aider.repo import ANY_GIT_ERROR, GitRepo
from aider.report import report_uncaught_exceptions
from aider.versioncheck import check_version, install_from_main_branch, install_upgrade
@@ -765,9 +765,48 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
selected_model_name = select_default_model(args, io, analytics)
if not selected_model_name:
# Error message and analytics event are handled within select_default_model
# It might have already offered OAuth if no model/keys were found.
# If it failed here, we exit.
return 1
args.model = selected_model_name # Update args with the selected model
# Check if an OpenRouter model was selected/specified but the key is missing
if args.model.startswith("openrouter/") and not os.environ.get("OPENROUTER_API_KEY"):
io.tool_warning(
f"The specified model '{args.model}' requires an OpenRouter API key, which was not"
" found."
)
# Attempt OAuth flow because the specific model needs it
if offer_openrouter_oauth(io, analytics):
# OAuth succeeded, the key should now be in os.environ.
# Check if the key is now present after the flow.
if os.environ.get("OPENROUTER_API_KEY"):
io.tool_output(
"OpenRouter successfully connected."
) # Inform user connection worked
else:
# This case should ideally not happen if offer_openrouter_oauth succeeded
# but check defensively.
io.tool_error(
"OpenRouter authentication seemed successful, but the key is still missing."
)
analytics.event(
"exit",
reason="OpenRouter key missing after successful OAuth for specified model",
)
return 1
else:
# OAuth failed or was declined by the user
io.tool_error(
f"Unable to proceed without an OpenRouter API key for model '{args.model}'."
)
io.offer_url(urls.models_and_keys, "Open documentation URL for more info?")
analytics.event(
"exit",
reason="OpenRouter key missing for specified model and OAuth failed/declined",
)
return 1
main_model = models.Model(
args.model,
weak_model=args.weak_model,

View File

@@ -88,10 +88,14 @@ MODEL_ALIASES = {
"3": "gpt-3.5-turbo",
# Other models
"deepseek": "deepseek/deepseek-chat",
"r1": "deepseek/deepseek-reasoner",
"flash": "gemini/gemini-2.0-flash-exp",
"quasar": "openrouter/openrouter/quasar-alpha",
"r1": "deepseek/deepseek-reasoner",
"gemini-2.5-pro": "gemini/gemini-2.5-pro-exp-03-25",
"gemini": "gemini/gemini-2.5-pro-exp-03-25",
"gemini": "gemini/gemini-2.5-pro-preview-03-25",
"gemini-exp": "gemini/gemini-2.5-pro-exp-03-25",
"grok3": "xai/grok-3-beta",
"optimus": "openrouter/openrouter/optimus-alpha",
}
# Model metadata loaded from resources and user's files.

View File

@@ -13,7 +13,6 @@ import requests
from aider import urls
from aider.io import InputOutput
from aider.utils import check_pip_install_extra
def check_openrouter_tier(api_key):
@@ -215,10 +214,6 @@ def exchange_code_for_key(code, code_verifier, io):
def start_openrouter_oauth_flow(io, analytics):
"""Initiates the OpenRouter OAuth PKCE flow using a local server."""
# Check for requests library
if not check_pip_install_extra(io, "requests", "OpenRouter OAuth", "aider[oauth]"):
return None
port = find_available_port()
if not port:
io.tool_error("Could not find an available port between 8484 and 8584.")

View File

@@ -15,7 +15,7 @@ Use these for <type>: fix, feat, build, chore, ci, docs, style, refactor, perf,
Ensure the commit message:
- Starts with the appropriate prefix.
- Is in the imperative mood (e.g., \"Add feature\" not \"Added feature\" or \"Adding feature\").
- Is in the imperative mood (e.g., \"add feature\" not \"added feature\" or \"adding feature\").
- Does not exceed 72 characters.
Reply only with the one-line commit message, without any additional text, explanations, \

View File

@@ -108,6 +108,15 @@
"output_cost_per_token": 0.0000009,
"mode": "chat",
},
"fireworks_ai/accounts/fireworks/models/deepseek-v3-0324": {
"max_tokens": 160000,
"max_input_tokens": 100000,
"max_output_tokens": 8192,
"litellm_provider": "fireworks_ai",
"input_cost_per_token": 0.0000009,
"output_cost_per_token": 0.0000009,
"mode": "chat",
},
"o3-mini": {
"max_tokens": 100000,
"max_input_tokens": 200000,
@@ -156,6 +165,26 @@
"supports_system_messages": true,
"supports_response_schema": true
},
"openrouter/openrouter/quasar-alpha": {
"max_input_tokens": 1000000,
"max_output_tokens": 32000,
"input_cost_per_token": 0.0,
"output_cost_per_token": 0.0,
"litellm_provider": "openrouter",
"mode": "chat",
"supports_vision": true,
"supports_function_calling": true,
"supports_system_messages": true,
"supports_prompt_caching": true
},
"openrouter/openrouter/optimus-alpha": {
"max_input_tokens": 1000000,
"max_output_tokens": 32000,
"input_cost_per_token": 0.0,
"output_cost_per_token": 0.0,
"litellm_provider": "openrouter",
"mode": "chat"
},
"openrouter/openai/gpt-4o-mini": {
"max_tokens": 16384,
"max_input_tokens": 128000,
@@ -305,6 +334,42 @@
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"gemini/gemini-2.5-pro-preview-03-25": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 64000,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
"max_audio_length_hours": 8.4,
"max_audio_per_prompt": 1,
"max_pdf_size_mb": 30,
"input_cost_per_image": 0,
"input_cost_per_video_per_second": 0,
"input_cost_per_audio_per_second": 0,
"input_cost_per_token": 0.00000125,
"input_cost_per_character": 0,
"input_cost_per_token_above_128k_tokens": 0,
"input_cost_per_character_above_128k_tokens": 0,
"input_cost_per_image_above_128k_tokens": 0,
"input_cost_per_video_per_second_above_128k_tokens": 0,
"input_cost_per_audio_per_second_above_128k_tokens": 0,
"output_cost_per_token": 0.000010,
"output_cost_per_character": 0,
"output_cost_per_token_above_128k_tokens": 0,
"output_cost_per_character_above_128k_tokens": 0,
"litellm_provider": "gemini",
"mode": "chat",
"supports_system_messages": true,
"supports_function_calling": true,
"supports_vision": true,
"supports_audio_input": true,
"supports_video_input": true,
"supports_pdf_input": true,
"supports_response_schema": true,
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"vertex_ai/gemini-2.5-pro-exp-03-25": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
@@ -341,6 +406,78 @@
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"vertex_ai/gemini-2.5-pro-preview-03-25": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 64000,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
"max_audio_length_hours": 8.4,
"max_audio_per_prompt": 1,
"max_pdf_size_mb": 30,
"input_cost_per_image": 0,
"input_cost_per_video_per_second": 0,
"input_cost_per_audio_per_second": 0,
"input_cost_per_token": 0.00000125,
"input_cost_per_character": 0,
"input_cost_per_token_above_128k_tokens": 0,
"input_cost_per_character_above_128k_tokens": 0,
"input_cost_per_image_above_128k_tokens": 0,
"input_cost_per_video_per_second_above_128k_tokens": 0,
"input_cost_per_audio_per_second_above_128k_tokens": 0,
"output_cost_per_token": 0.000010,
"output_cost_per_character": 0,
"output_cost_per_token_above_128k_tokens": 0,
"output_cost_per_character_above_128k_tokens": 0,
"litellm_provider": "vertex_ai-language-models",
"mode": "chat",
"supports_system_messages": true,
"supports_function_calling": true,
"supports_vision": true,
"supports_audio_input": true,
"supports_video_input": true,
"supports_pdf_input": true,
"supports_response_schema": true,
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"openrouter/google/gemini-2.5-pro-preview-03-25": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 64000,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
"max_audio_length_hours": 8.4,
"max_audio_per_prompt": 1,
"max_pdf_size_mb": 30,
"input_cost_per_image": 0,
"input_cost_per_video_per_second": 0,
"input_cost_per_audio_per_second": 0,
"input_cost_per_token": 0.00000125,
"input_cost_per_character": 0,
"input_cost_per_token_above_128k_tokens": 0,
"input_cost_per_character_above_128k_tokens": 0,
"input_cost_per_image_above_128k_tokens": 0,
"input_cost_per_video_per_second_above_128k_tokens": 0,
"input_cost_per_audio_per_second_above_128k_tokens": 0,
"output_cost_per_token": 0.000010,
"output_cost_per_character": 0,
"output_cost_per_token_above_128k_tokens": 0,
"output_cost_per_character_above_128k_tokens": 0,
"litellm_provider": "vertex_ai-language-models",
"mode": "chat",
"supports_system_messages": true,
"supports_function_calling": true,
"supports_vision": true,
"supports_audio_input": true,
"supports_video_input": true,
"supports_pdf_input": true,
"supports_response_schema": true,
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"openrouter/google/gemini-2.5-pro-exp-03-25:free": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
@@ -355,9 +492,9 @@
"input_cost_per_video_per_second": 0,
"input_cost_per_audio_per_second": 0,
"input_cost_per_token": 0,
"input_cost_per_character": 0,
"input_cost_per_token_above_128k_tokens": 0,
"input_cost_per_character_above_128k_tokens": 0,
"input_cost_per_character": 0,
"input_cost_per_token_above_128k_tokens": 0,
"input_cost_per_character_above_128k_tokens": 0,
"input_cost_per_image_above_128k_tokens": 0,
"input_cost_per_video_per_second_above_128k_tokens": 0,
"input_cost_per_audio_per_second_above_128k_tokens": 0,
@@ -377,4 +514,59 @@
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"openrouter/x-ai/grok-3-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0.000003,
"output_cost_per_token": 0.000015,
"litellm_provider": "openrouter",
"mode": "chat"
},
"xai/grok-3-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0.000003,
"output_cost_per_token": 0.000015,
"litellm_provider": "xai",
"mode": "chat"
},
"openrouter/x-ai/grok-3-mini-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0.0000003,
"output_cost_per_token": 0.0000005,
"litellm_provider": "openrouter",
"mode": "chat"
},
"xai/grok-3-mini-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0.0000003,
"output_cost_per_token": 0.0000005,
"litellm_provider": "xai",
"mode": "chat"
},
"openrouter/google/gemini-2.0-flash-exp:free": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 8192,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
"max_audio_length_hours": 8.4,
"max_audio_per_prompt": 1,
"max_pdf_size_mb": 30,
"litellm_provider": "openrouter",
"mode": "chat",
"supports_system_messages": true,
"supports_function_calling": true,
"supports_vision": true,
"supports_response_schema": true,
"supports_audio_output": true,
"supports_tool_choice": true
},
}

View File

@@ -589,8 +589,6 @@
weak_model_name: openrouter/deepseek/deepseek-chat-v3-0324:free
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 131072
caches_by_default: true
use_temperature: false
editor_model_name: openrouter/deepseek/deepseek-chat-v3-0324:free
@@ -819,7 +817,7 @@
use_temperature: false
editor_model_name: openrouter/deepseek/deepseek-chat
editor_edit_format: editor-diff
- name: fireworks_ai/accounts/fireworks/models/deepseek-r1
edit_format: diff
weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
@@ -840,6 +838,14 @@
extra_params:
max_tokens: 128000
- name: fireworks_ai/accounts/fireworks/models/deepseek-v3-0324
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 160000
- name: openai/o3-mini
edit_format: diff
weak_model_name: gpt-4o-mini
@@ -849,7 +855,7 @@
editor_edit_format: editor-diff
system_prompt_prefix: "Formatting re-enabled. "
accepts_settings: ["reasoning_effort"]
- name: o3-mini
edit_format: diff
weak_model_name: gpt-4o-mini
@@ -899,7 +905,7 @@
examples_as_sys_msg: true
editor_model_name: gpt-4o
editor_edit_format: editor-diff
- name: openai/gpt-4.5-preview
edit_format: diff
weak_model_name: gpt-4o-mini
@@ -944,21 +950,73 @@
- name: gemini/gemma-3-27b-it
use_system_prompt: false
- name: openrouter/google/gemma-3-27b-it:free
use_system_prompt: false
- name: openrouter/google/gemma-3-27b-it
use_system_prompt: false
- name: gemini/gemini-2.5-pro-preview-03-25
edit_format: diff-fenced
use_repo_map: true
weak_model_name: gemini/gemini-2.0-flash
- name: gemini/gemini-2.5-pro-exp-03-25
edit_format: diff-fenced
use_repo_map: true
weak_model_name: gemini/gemini-2.0-flash
- name: openrouter/google/gemini-2.5-pro-exp-03-25:free
edit_format: diff-fenced
use_repo_map: true
weak_model_name: openrouter/google/gemini-2.0-flash-exp:free
- name: vertex_ai/gemini-2.5-pro-exp-03-25
edit_format: diff-fenced
use_repo_map: true
# Need metadata for this one...
#weak_model_name: vertex_ai/gemini-2.0-flash
- name: vertex_ai/gemini-2.5-pro-preview-03-25
edit_format: diff-fenced
use_repo_map: true
# Need metadata for this one...
#weak_model_name: vertex_ai/gemini-2.0-flash
- name: openrouter/openrouter/quasar-alpha
use_repo_map: true
edit_format: diff
examples_as_sys_msg: true
- name: openrouter/x-ai/grok-3-beta
use_repo_map: true
edit_format: diff
- name: xai/grok-3-beta
use_repo_map: true
edit_format: diff
- name: openrouter/x-ai/grok-3-mini-beta
use_repo_map: true
edit_format: whole
accepts_settings:
- reasoning_effort
#extra_params:
# extra_body:
# reasoning_effort: high
- name: xai/grok-3-mini-beta
use_repo_map: true
edit_format: whole
accepts_settings:
- reasoning_effort
#extra_params:
# extra_body:
# reasoning_effort: low
- name: openrouter/openrouter/optimus-alpha
use_repo_map: true
edit_format: diff
examples_as_sys_msg: true

View File

@@ -25,7 +25,60 @@ cog.out(text)
### main branch
- Add the `openrouter/deepseek-chat-v3-0324:free` model.
- Commit messages generated by aider are no longer forced to be entirely lowercase, by Peter Hadlaw.
- Updated default settings for Grok models.
- Aider wrote 64% of the code in this release.
### Aider v0.81.2
- Add support for `xai/grok-3-beta`, `xai/grok-3-mini-beta`, `openrouter/x-ai/grok-3-beta`, `openrouter/x-ai/grok-3-mini-beta`, and `openrouter/openrouter/optimus-alpha` models.
- Add alias "grok3" for `xai/grok-3-beta`.
- Add alias "optimus" for `openrouter/openrouter/optimus-alpha`.
- Fix URL extraction from error messages.
- Allow adding files by full path even if a file with the same basename is already in the chat.
- Fix quoting of values containing '#' in the sample `aider.conf.yml`.
- Add support for Fireworks AI model 'deepseek-v3-0324', by Felix Lisczyk.
- Commit messages generated by aider are now lowercase, by Anton Ödman.
- Aider wrote 64% of the code in this release.
### Aider v0.81.1
- Added support for the `gemini/gemini-2.5-pro-preview-03-25` model.
- Updated the `gemini` alias to point to `gemini/gemini-2.5-pro-preview-03-25`.
- Added the `gemini-exp` alias for `gemini/gemini-2.5-pro-exp-03-25`.
- Aider wrote 87% of the code in this release.
### Aider v0.81.0
- Added support for the `openrouter/openrouter/quasar-alpha` model.
- Run with `aider --model quasar`
- Offer OpenRouter OAuth authentication if an OpenRouter model is specified but the API key is missing.
- Prevent retrying API calls when the provider reports insufficient credits.
- Improve URL detection to exclude trailing double quotes.
- Aider wrote 86% of the code in this release.
### Aider v0.80.4
- Bumped deps to pickup litellm change to properly display the root cause of OpenRouter "choices" errors.
### Aider v0.80.3
- Improve error message for OpenRouter API connection issues to mention potential rate limiting or upstream provider issues.
- Configure weak models (`gemini/gemini-2.0-flash` and `openrouter/google/gemini-2.0-flash-exp:free`) for Gemini 2.5 Pro models.
- Add model metadata for `openrouter/google/gemini-2.0-flash-exp:free`.
### Aider v0.80.2
- Bumped deps.
### Aider v0.80.1
- Updated deps for yanked fsspec and aiohttp packages #3699
- Removed redundant dependency check during OpenRouter OAuth flow, by Claudia Pellegrino.
### Aider v0.80.0
- OpenRouter OAuth integration:
- Offer to OAuth against OpenRouter if no model and keys are provided.
- Select OpenRouter default model based on free/paid tier status if `OPENROUTER_API_KEY` is set and no model is specified.
@@ -38,6 +91,7 @@ cog.out(text)
- Update edit format to the new model's default when switching models with `/model`, if the user was using the old model's default format.
- Add `Ctrl-X Ctrl-E` keybinding to edit the current input buffer in an external editor, by Matteo Landi.
- Fix linting errors for filepaths containing shell metacharacters, by Mir Adnan ALI.
- Add the `openrouter/deepseek-chat-v3-0324:free` model.
- Add repomap support for the Scala language, by Vasil Markoukin.
- Fixed bug in `/run` that was preventing auto-testing.
- Fix bug preventing `UnboundLocalError` during git tree traversal.

View File

@@ -4319,3 +4319,132 @@
Paul Gauthier (aider): 221
start_tag: v0.78.0
total_lines: 338
- aider_percentage: 86.86
aider_total: 1837
end_date: '2025-03-31'
end_tag: v0.80.0
file_counts:
aider/__init__.py:
Paul Gauthier: 1
aider/coders/base_coder.py:
Paul Gauthier: 2
aider/commands.py:
Paul Gauthier: 4
Paul Gauthier (aider): 20
aider/exceptions.py:
Paul Gauthier: 1
Paul Gauthier (aider): 3
aider/io.py:
Andrey Ivanov: 2
Matteo Landi (aider): 11
Paul Gauthier (aider): 38
aider/linter.py:
Mir Adnan ALI: 2
aider/main.py:
Paul Gauthier: 1
Paul Gauthier (aider): 21
aider/mdstream.py:
Peter Schilling (aider) (aider): 25
aider/models.py:
Paul Gauthier: 12
Paul Gauthier (aider): 9
aider/onboarding.py:
Paul Gauthier: 44
Paul Gauthier (aider): 389
aider/queries/tree-sitter-languages/scala-tags.scm:
Vasil Markoukin: 65
aider/repo.py:
Paul Gauthier: 1
Paul Gauthier (aider): 7
aider/repomap.py:
Paul Gauthier (aider): 19
aider/resources/model-settings.yml:
Paul Gauthier (aider): 13
aider/scrape.py:
Paul Gauthier: 1
Paul Gauthier (aider): 1
aider/utils.py:
Paul Gauthier (aider): 5
aider/watch.py:
Matteo Landi (aider): 2
aider/website/_includes/leaderboard.js:
Paul Gauthier: 1
Paul Gauthier (aider): 2
aider/website/docs/leaderboards/index.md:
Paul Gauthier: 1
aider/website/index.html:
Paul Gauthier: 51
Paul Gauthier (aider): 175
scripts/30k-image.py:
Paul Gauthier: 8
Paul Gauthier (aider): 227
scripts/homepage.py:
Paul Gauthier (aider): 122
tests/basic/test_commands.py:
Paul Gauthier: 2
Paul Gauthier (aider): 48
tests/basic/test_exceptions.py:
Paul Gauthier (aider): 17
tests/basic/test_io.py:
Paul Gauthier (aider): 28
tests/basic/test_main.py:
Paul Gauthier: 15
Paul Gauthier (aider): 199
tests/basic/test_onboarding.py:
Paul Gauthier (aider): 439
tests/basic/test_repomap.py:
Vasil Markoukin: 3
tests/basic/test_ssl_verification.py:
Paul Gauthier (aider): 8
tests/basic/test_watch.py:
Matteo Landi (aider): 9
tests/fixtures/languages/scala/test.scala:
Vasil Markoukin: 61
grand_total:
Andrey Ivanov: 2
Matteo Landi (aider): 22
Mir Adnan ALI: 2
Paul Gauthier: 145
Paul Gauthier (aider): 1790
Peter Schilling (aider) (aider): 25
Vasil Markoukin: 129
start_tag: v0.79.0
total_lines: 2115
- aider_percentage: 85.55
aider_total: 225
end_date: '2025-04-04'
end_tag: v0.81.0
file_counts:
.github/workflows/check_pypi_version.yml:
Paul Gauthier: 11
Paul Gauthier (aider): 75
.github/workflows/windows_check_pypi_version.yml:
Paul Gauthier: 4
Paul Gauthier (aider): 86
aider/__init__.py:
Paul Gauthier: 1
aider/coders/base_coder.py:
Paul Gauthier (aider): 4
aider/exceptions.py:
Paul Gauthier: 6
Paul Gauthier (aider): 12
aider/main.py:
Paul Gauthier (aider): 40
aider/models.py:
Paul Gauthier (aider): 2
aider/resources/model-settings.yml:
Paul Gauthier: 9
Paul Gauthier (aider): 1
aider/website/_includes/leaderboard.js:
Paul Gauthier (aider): 5
aider/website/docs/leaderboards/index.md:
Paul Gauthier: 1
aider/website/index.html:
Paul Gauthier: 3
tests/basic/test_exceptions.py:
Paul Gauthier: 3
grand_total:
Paul Gauthier: 38
Paul Gauthier (aider): 225
start_tag: v0.80.0
total_lines: 263

View File

@@ -807,31 +807,31 @@
seconds_per_case: 290.0
total_cost: 1.1164
- dirname: 2025-03-25-19-46-45--gemini-25-pro-exp-diff-fenced
- dirname: 2025-04-12-04-55-50--gemini-25-pro-diff-fenced
test_cases: 225
model: Gemini 2.5 Pro exp-03-25
model: Gemini 2.5 Pro Preview 03-25
edit_format: diff-fenced
commit_hash: 33413ec
pass_rate_1: 39.1
commit_hash: 0282574
pass_rate_1: 40.9
pass_rate_2: 72.9
pass_num_1: 88
pass_num_1: 92
pass_num_2: 164
percent_cases_well_formed: 89.8
error_outputs: 30
num_malformed_responses: 30
num_with_malformed_responses: 23
user_asks: 57
percent_cases_well_formed: 92.4
error_outputs: 21
num_malformed_responses: 21
num_with_malformed_responses: 17
user_asks: 69
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
test_timeouts: 2
total_tests: 225
command: aider --model gemini/gemini-2.5-pro-exp-03-25
date: 2025-03-25
versions: 0.78.1.dev
seconds_per_case: 47.1
total_cost: 0.0000
command: aider --model gemini/gemini-2.5-pro-preview-03-25
date: 2025-04-12
versions: 0.81.3.dev
seconds_per_case: 45.3
total_cost: 6.3174
- dirname: 2025-03-29-05-24-55--chatgpt4o-mar28-diff
test_cases: 225
@@ -857,4 +857,160 @@
date: 2025-03-29
versions: 0.79.3.dev
seconds_per_case: 10.3
total_cost: 19.7416
total_cost: 19.7416
- dirname: 2025-04-04-02-57-25--qalpha-diff-exsys
test_cases: 225
model: Quasar Alpha
edit_format: diff
commit_hash: 8a34a6c-dirty
pass_rate_1: 21.8
pass_rate_2: 54.7
pass_num_1: 49
pass_num_2: 123
percent_cases_well_formed: 98.2
error_outputs: 4
num_malformed_responses: 4
num_with_malformed_responses: 4
user_asks: 187
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
total_tests: 225
command: aider --model openrouter/openrouter/quasar-alpha
date: 2025-04-04
versions: 0.80.5.dev
seconds_per_case: 14.8
total_cost: 0.0000
- dirname: 2025-04-06-08-39-52--llama-4-maverick-17b-128e-instruct-polyglot-whole
test_cases: 225
model: Llama 4 Maverick
edit_format: whole
commit_hash: 9445a31
pass_rate_1: 4.4
pass_rate_2: 15.6
pass_num_1: 10
pass_num_2: 35
percent_cases_well_formed: 99.1
error_outputs: 12
num_malformed_responses: 2
num_with_malformed_responses: 2
user_asks: 248
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
total_tests: 225
command: aider --model nvidia_nim/meta/llama-4-maverick-17b-128e-instruct
date: 2025-04-06
versions: 0.81.2.dev
seconds_per_case: 20.5
total_cost: 0.0000
- dirname: 2025-04-10-04-21-31--grok3-diff-exuser
test_cases: 225
model: Grok 3 Beta
edit_format: diff
commit_hash: 2dd40fc-dirty
pass_rate_1: 22.2
pass_rate_2: 53.3
pass_num_1: 50
pass_num_2: 120
percent_cases_well_formed: 99.6
error_outputs: 1
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 68
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
total_tests: 225
command: aider --model openrouter/x-ai/grok-3-beta
date: 2025-04-10
versions: 0.81.2.dev
seconds_per_case: 15.3
total_cost: 11.0338
- dirname: 2025-04-10-18-47-24--grok3-mini-whole-exuser
test_cases: 225
model: Grok 3 Mini Beta (low)
edit_format: whole
commit_hash: 14ffe77-dirty
pass_rate_1: 11.1
pass_rate_2: 34.7
pass_num_1: 25
pass_num_2: 78
percent_cases_well_formed: 100.0
error_outputs: 3
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 73
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 5
total_tests: 225
command: aider --model openrouter/x-ai/grok-3-mini-beta
date: 2025-04-10
versions: 0.81.2.dev
seconds_per_case: 35.1
total_cost: 0.7856
- dirname: 2025-04-10-23-59-02--xai-grok3-mini-whole-high
test_cases: 225
model: Grok 3 Mini Beta (high)
edit_format: whole
commit_hash: 8ee33da-dirty
pass_rate_1: 17.3
pass_rate_2: 49.3
pass_num_1: 39
pass_num_2: 111
percent_cases_well_formed: 99.6
error_outputs: 1
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 64
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 0
total_tests: 225
command: aider --model xai/grok-3-mini-beta --reasoning-effort high
date: 2025-04-10
versions: 0.81.3.dev
seconds_per_case: 79.1
total_cost: 0.7346
- dirname: 2025-04-10-19-02-44--oalpha-diff-exsys
test_cases: 225
model: Optimus Alpha
edit_format: diff
commit_hash: 532bc45-dirty
pass_rate_1: 21.3
pass_rate_2: 52.9
pass_num_1: 48
pass_num_2: 119
percent_cases_well_formed: 97.3
error_outputs: 7
num_malformed_responses: 6
num_with_malformed_responses: 6
user_asks: 182
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
total_tests: 225
command: aider --model openrouter/openrouter/optimus-alpha
date: 2025-04-10
versions: 0.81.2.dev
seconds_per_case: 18.4
total_cost: 0.0000

View File

@@ -4,7 +4,11 @@ document.addEventListener('DOMContentLoaded', function () {
const redDiagonalPattern = pattern.draw('diagonal', 'rgba(255, 99, 132, 0.2)');
let displayedData = [];
const HIGHLIGHT_MODEL = '{{ highlight_model | default: "no no no" }}';
// Get highlight model from query string or Jekyll variable
const urlParams = new URLSearchParams(window.location.search);
const queryHighlight = urlParams.get('highlight');
const HIGHLIGHT_MODEL = queryHighlight || '{{ highlight_model | default: "no no no" }}';
var leaderboardData = {
labels: [],
datasets: [{
@@ -13,14 +17,14 @@ document.addEventListener('DOMContentLoaded', function () {
backgroundColor: function(context) {
const row = allData[context.dataIndex];
if (row && row.edit_format === 'whole') {
return diagonalPattern;
return redDiagonalPattern; // Use red pattern for highlighted whole format
}
const label = leaderboardData.labels[context.dataIndex] || '';
return (label && label.includes(HIGHLIGHT_MODEL)) ? 'rgba(255, 99, 132, 0.2)' : 'rgba(54, 162, 235, 0.2)';
return (label && HIGHLIGHT_MODEL && label.toLowerCase().includes(HIGHLIGHT_MODEL.toLowerCase())) ? 'rgba(255, 99, 132, 0.2)' : 'rgba(54, 162, 235, 0.2)';
},
borderColor: function(context) {
const label = context.chart.data.labels[context.dataIndex] || '';
return (label && label.includes(HIGHLIGHT_MODEL)) ? 'rgba(255, 99, 132, 1)' : 'rgba(54, 162, 235, 1)';
return (label && HIGHLIGHT_MODEL && label.toLowerCase().includes(HIGHLIGHT_MODEL.toLowerCase())) ? 'rgba(255, 99, 132, 1)' : 'rgba(54, 162, 235, 1)';
},
borderWidth: 1
}, {
@@ -74,11 +78,13 @@ document.addEventListener('DOMContentLoaded', function () {
leaderboardChart.render();
}
// Use displayedData in the backgroundColor callback instead of allData
// Update backgroundColor and borderColor for the main dataset based on displayedData
leaderboardData.datasets[0].backgroundColor = function(context) {
const row = displayedData[context.dataIndex];
const label = leaderboardData.labels[context.dataIndex] || '';
if (label && label.includes(HIGHLIGHT_MODEL)) {
const isHighlighted = label && HIGHLIGHT_MODEL && label.toLowerCase().includes(HIGHLIGHT_MODEL.toLowerCase());
if (isHighlighted) {
if (row && row.edit_format === 'whole') return redDiagonalPattern;
else return 'rgba(255, 99, 132, 0.2)';
} else if (row && row.edit_format === 'whole') {

Binary file not shown.

After

Width:  |  Height:  |  Size: 260 KiB

File diff suppressed because it is too large Load Diff

View File

@@ -171,19 +171,19 @@
#stream: true
## Set the color for user input (default: #00cc00)
#user-input-color: #00cc00
#user-input-color: "#00cc00"
## Set the color for tool output (default: None)
#tool-output-color: "xxx"
## Set the color for tool error messages (default: #FF2222)
#tool-error-color: #FF2222
#tool-error-color: "#FF2222"
## Set the color for tool warning messages (default: #FFA500)
#tool-warning-color: #FFA500
#tool-warning-color: "#FFA500"
## Set the color for assistant output (default: #0088ff)
#assistant-output-color: #0088ff
#assistant-output-color: "#0088ff"
## Set the color for the completion menu (default: terminal's default text color)
#completion-menu-color: "xxx"

View File

@@ -569,6 +569,14 @@ cog.out("```\n")
extra_params:
max_tokens: 128000
- name: fireworks_ai/accounts/fireworks/models/deepseek-v3-0324
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 160000
- name: fireworks_ai/accounts/fireworks/models/qwq-32b
edit_format: diff
weak_model_name: fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct
@@ -612,6 +620,12 @@ cog.out("```\n")
- name: gemini/gemini-2.5-pro-exp-03-25
edit_format: diff-fenced
weak_model_name: gemini/gemini-2.0-flash
use_repo_map: true
- name: gemini/gemini-2.5-pro-preview-03-25
edit_format: diff-fenced
weak_model_name: gemini/gemini-2.0-flash
use_repo_map: true
- name: gemini/gemini-exp-1114
@@ -962,8 +976,6 @@ cog.out("```\n")
weak_model_name: openrouter/deepseek/deepseek-chat-v3-0324:free
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 131072
caches_by_default: true
use_temperature: false
editor_model_name: openrouter/deepseek/deepseek-r1:free
@@ -1022,6 +1034,7 @@ cog.out("```\n")
- name: openrouter/google/gemini-2.5-pro-exp-03-25:free
edit_format: diff-fenced
weak_model_name: openrouter/google/gemini-2.0-flash-exp:free
use_repo_map: true
- name: openrouter/google/gemma-3-27b-it
@@ -1097,6 +1110,16 @@ cog.out("```\n")
accepts_settings:
- reasoning_effort
- name: openrouter/openrouter/optimus-alpha
edit_format: diff
use_repo_map: true
examples_as_sys_msg: true
- name: openrouter/openrouter/quasar-alpha
edit_format: diff
use_repo_map: true
examples_as_sys_msg: true
- name: openrouter/qwen/qwen-2.5-coder-32b-instruct
edit_format: diff
weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
@@ -1104,6 +1127,15 @@ cog.out("```\n")
editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
editor_edit_format: editor-diff
- name: openrouter/x-ai/grok-3-beta
edit_format: diff
use_repo_map: true
- name: openrouter/x-ai/grok-3-mini-beta
use_repo_map: true
accepts_settings:
- reasoning_effort
- name: vertex_ai-anthropic_models/vertex_ai/claude-3-7-sonnet@20250219
edit_format: diff
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
@@ -1169,9 +1201,22 @@ cog.out("```\n")
edit_format: diff-fenced
use_repo_map: true
- name: vertex_ai/gemini-2.5-pro-preview-03-25
edit_format: diff-fenced
use_repo_map: true
- name: vertex_ai/gemini-pro-experimental
edit_format: diff-fenced
use_repo_map: true
- name: xai/grok-3-beta
edit_format: diff
use_repo_map: true
- name: xai/grok-3-mini-beta
use_repo_map: true
accepts_settings:
- reasoning_effort
```
<!--[[[end]]]-->

View File

@@ -225,19 +225,19 @@ cog.outl("```")
#stream: true
## Set the color for user input (default: #00cc00)
#user-input-color: #00cc00
#user-input-color: "#00cc00"
## Set the color for tool output (default: None)
#tool-output-color: "xxx"
## Set the color for tool error messages (default: #FF2222)
#tool-error-color: #FF2222
#tool-error-color: "#FF2222"
## Set the color for tool warning messages (default: #FFA500)
#tool-warning-color: #FFA500
#tool-warning-color: "#FFA500"
## Set the color for assistant output (default: #0088ff)
#assistant-output-color: #0088ff
#assistant-output-color: "#0088ff"
## Set the color for the completion menu (default: terminal's default text color)
#completion-menu-color: "xxx"

View File

@@ -80,10 +80,14 @@ for alias, model in sorted(MODEL_ALIASES.items()):
- `4o`: gpt-4o
- `deepseek`: deepseek/deepseek-chat
- `flash`: gemini/gemini-2.0-flash-exp
- `gemini`: gemini/gemini-2.5-pro-exp-03-25
- `gemini`: gemini/gemini-2.5-pro-preview-03-25
- `gemini-2.5-pro`: gemini/gemini-2.5-pro-exp-03-25
- `gemini-exp`: gemini/gemini-2.5-pro-exp-03-25
- `grok3`: xai/grok-3-beta
- `haiku`: claude-3-5-haiku-20241022
- `optimus`: openrouter/openrouter/optimus-alpha
- `opus`: claude-3-opus-20240229
- `quasar`: openrouter/openrouter/quasar-alpha
- `r1`: deepseek/deepseek-reasoner
- `sonnet`: anthropic/claude-3-7-sonnet-20250219
<!--[[[end]]]-->

View File

@@ -264,9 +264,18 @@ tr:hover { background-color: #f5f5f5; }
</style>
<table>
<tr><th>Model Name</th><th class='right'>Total Tokens</th><th class='right'>Percent</th></tr>
<tr><td>gemini/gemini-2.5-pro-exp-03-25</td><td class='right'>1,729,168</td><td class='right'>77.1%</td></tr>
<tr><td>anthropic/claude-3-7-sonnet-20250219</td><td class='right'>514,309</td><td class='right'>22.9%</td></tr>
<tr><td>gemini/gemini-2.5-pro-exp-03-25</td><td class='right'>1,119,621</td><td class='right'>77.4%</td></tr>
<tr><td>gemini/gemini-2.5-pro-preview-03-25</td><td class='right'>269,898</td><td class='right'>18.6%</td></tr>
<tr><td>openrouter/anthropic/claude-3.7-sonnet</td><td class='right'>18,140</td><td class='right'>1.3%</td></tr>
<tr><td>o3-mini</td><td class='right'>17,296</td><td class='right'>1.2%</td></tr>
<tr><td>openrouter/x-ai/grok-3-mini-beta</td><td class='right'>16,987</td><td class='right'>1.2%</td></tr>
<tr><td>openrouter/REDACTED</td><td class='right'>4,099</td><td class='right'>0.3%</td></tr>
<tr><td>xai/grok-3-mini-beta</td><td class='right'>1,224</td><td class='right'>0.1%</td></tr>
</table>
{: .note :}
Some models show as REDACTED, because they are new or unpopular models.
Aider's analytics only records the names of "well known" LLMs.
<!--[[[end]]]-->
## How are the "aider wrote xx% of code" stats computed?

View File

@@ -36,17 +36,16 @@ If you can find and share that file in a
[GitHub issue](https://github.com/Aider-AI/aider/issues),
then it may be possible to add repo map support.
If aider doesn't support linting, it will be complicated to
add linting and repo map support.
That is because aider relies on
[py-tree-sitter-languages](https://github.com/grantjenks/py-tree-sitter-languages)
If aider doesn't already support linting your language,
it will be more complicated to add support.
Aider relies on
[tree-sitter-language-pack](https://github.com/Goldziher/tree-sitter-language-pack)
to provide pre-packaged versions of tree-sitter
parsers for many languages.
Aider needs to be easy for users to install in many environments,
and it is probably too complex to add dependencies on
additional individual tree-sitter parsers.
language parsers.
This makes it easy for users to install aider in many diverse environments.
You probably need to work with that project to get your language
supported, which will easily allow aider to lint that language.
For repo-map support, you will also need to find or create a `tags.scm` file.
<!--[[[cog
from aider.repomap import get_supported_languages_md

View File

@@ -128,6 +128,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
March 31, 2025.
April 12, 2025.
<!--[[[end]]]-->
</p>

View File

@@ -124,6 +124,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
March 31, 2025.
April 12, 2025.
<!--[[[end]]]-->
</p>

View File

@@ -73,6 +73,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
March 31, 2025.
April 12, 2025.
<!--[[[end]]]-->
</p>

View File

@@ -98,7 +98,7 @@ if result.returncode == 0:
date = datetime.datetime.fromtimestamp(timestamp)
cog.out(f"{date.strftime('%B %d, %Y.')}")
]]]-->
March 31, 2025.
April 12, 2025.
<!--[[[end]]]-->
</p>

View File

@@ -12,16 +12,16 @@ python -m pip install -U aider-chat
# Mac/Linux:
export AZURE_API_KEY=<key>
export AZURE_API_VERSION=2023-05-15
export AZURE_API_VERSION=2024-12-01-preview
export AZURE_API_BASE=https://myendpt.openai.azure.com
# Windows
setx AZURE_API_KEY <key>
setx AZURE_API_VERSION 2023-05-15
setx AZURE_API_VERSION 2024-12-01-preview
setx AZURE_API_BASE https://myendpt.openai.azure.com
# ... restart your shell after setx commands
aider --model azure/<your_deployment_name>
aider --model azure/<your_model_deployment_name>
# List models available from Azure
aider --list-models azure/
@@ -29,3 +29,9 @@ aider --list-models azure/
Note that aider will also use environment variables
like `AZURE_OPENAI_API_xxx`.
The `aider --list-models azure/` command will list all models that aider supports through Azure, not the models that are available for the provided endpoint.
When setting the model to use with `--model azure/<your_model_deployment_name>`, `<your_model_deployment_name>` is likely just the name of the model you have deployed to the endpoint for example `o3-mini` or `gpt-4o`. The screenshow below shows `o3-mini` and `gpt-4o` deployments in the Azure portal done under the `myendpt` resource.
![example azure deployment](/assets/azure-deployment.png)

View File

@@ -71,6 +71,7 @@ cog.out(model_list)
- claude-3-sonnet-20240229
- codestral/codestral-2405
- codestral/codestral-latest
- databricks/databricks-claude-3-7-sonnet
- deepseek/deepseek-chat
- deepseek/deepseek-coder
- deepseek/deepseek-reasoner

View File

@@ -5,7 +5,27 @@ nav_order: 28
# Models and API keys
You need to tell aider which LLM to use and provide an API key.
Aider needs to know which LLM model you would like to work with and which keys
to provide when accessing it via API.
## Defaults
If you don't explicitly name a model, aider will try to select a model
for you to work with.
First, aider will check which
[keys you have provided via the environment, config files, or command line arguments](https://aider.chat/docs/config/api-keys.html).
Based on the available keys, aider will select the best model to use.
If you have not provided any keys, aider will offer to help you connect to
[OpenRouter](http://openrouter.ai)
which provides both free and paid access to most popular LLMs.
Once connected, aider will select the best model available on OpenRouter
based on whether you have a free or paid account there.
## Specifying model & key
You can also tell aider which LLM to use and provide an API key.
The easiest way is to use the `--model` and `--api-key`
command line arguments, like this:

View File

@@ -9,9 +9,9 @@ description: Using the code, architect, ask and help chat modes.
Aider has a few different chat modes:
- `code` - Aider will make changes to your code to satisfy your requests.
- `architect` - Aider will first propose a solution, then ask if you want it to turn that proposal into edits to your files.
- `ask` - Aider will answer questions about your code, but never edit it.
- `help` - Aider will answer questions about using aider, configuring, troubleshooting, etc.
- `ask` - Aider will discuss your code and answer questions about it, but never make changes.
- `architect` - Like code mode, aider will change your files. An architect model will propose changes and an editor model will translate that proposal into specific file edits.
- `help` - Aider will answer questions about aider: usage, configuration, troubleshooting, etc.
By default, aider starts in "code" mode. As you are talking, you can
send individual messages in a specific mode using
@@ -40,31 +40,104 @@ Or you can switch between coding modes using these commands without arguments:
Or you can launch aider in one of the modes with the `--chat-mode <mode>` switch.
There is also a special shortcut `--architect` to launch in `--chat-mode architect`.
The aider prompt will indicate the active mode:
```
> This is code mode.
ask> This is ask mode.
architect> This is architect mode.
```
## Ask/code workflow
A recommended workflow is to bounce back and forth between `/ask` and `/code` modes.
Use ask mode to discuss what you want to do, get suggestions or options from aider
and provide feedback on the approach.
Once aider understands the mission, switch to code mode to have it start editing
your files.
All the conversation and decision making from ask mode will
help ensure that the correct code changes are performed.
You can be very terse when you finally switch from ask to code mode.
Saying something as simple as "go ahead" in code mode will
have aider execute on the plan you've been discussing.
Here's an example with two ask mode messages to agree on the plan,
followed by two terse code mode messages to edit the code.
````
─────────────────────────────────────────────────────────────────────────────────────
Aider v0.79.0
Model: gemini/gemini-2.5-pro-exp-03-25 with diff-fenced edit format
> /ask What's the best thing to print if we're making a quick little demo program?
It sounds like you'd like to write a "Hello, world!" program. That's a very
popular phrase to use for small demo programs.
> /ask Yes, that's the idea. What are some more creative options?
1. "Greetings, Earthlings!"
2. "Beep boop, I am a computer!"
3. "To code, or not to code, that is the question."
4. "I think, therefore I print."
5. "01001000 01101001!" (Binary for "Hi!")
> do 4
hello.py
>>>>>>> SEARCH
=======
def main():
print("I think, therefore I print.")
<<<<<<< REPLACE
> ALL CAPS!
hello.py
>>>>>>> SEARCH
print("I think, therefore I print.")
=======
print("I THINK, THEREFORE I PRINT!")
<<<<<<< REPLACE
````
You can think of this ask/code workflow as a more fluid version of
architect mode, but working just with one model the whole time.
## Architect mode and the editor model
When you are in architect mode, aider sends your request to two models:
When you are in architect mode, aider sends your requests to two models:
1. First, it sends your request to the main active model.
The main model is configured with `/model`, `--model` or the shortcut switches like `--sonnet`.
After the main model replies, aider will offer to edit the files based on the response.
1. First, it sends your request to the main model which will act as an architect
to propose how to solve your coding request.
The main model is configured with `/model` or `--model`.
2. To edit the files, aider sends a second LLM request asking for specific code editing instructions.
This request goes to the "editor" model.
2. Aider then sends another request to an "editor model",
asking it to turn the architect's proposal into specific file editing instructions.
Aider has built in defaults to select an editor model based on your main model.
Or, you can choose an editor model yourself with `--editor-model <model>`.
Or, you can choose a specific editor model with `--editor-model <model>`.
Architect mode produces better results than code mode, but uses two LLM requests.
This probably makes it slower and more expensive than using code mode.
Certain LLMs aren't able to propose coding solutions *and*
specify detailed file edits all in one go.
For these models, architect mode can produce better results than code mode
by pairing them
with an editor model that is responsible for generating the file editing instructions.
But this uses two LLM requests,
which can take longer and increase costs.
Architect mode is especially useful with OpenAI's o1 models, which are strong at
reasoning but less capable at editing files.
Pairing an o1 architect with an editor model like GPT-4o or Sonnet will
give the best results.
But architect mode is also quite helpful when you use GPT-4o or Sonnet
at both the architect and the editor.
But architect mode can also be helpful when you use the same model
as both the architect and the editor.
Allowing the model two requests to solve the problem and edit the files
usually provides a better result.
can sometimes provide better results.
The editor model uses one of aider's edit formats to let the LLM
edit source files.
@@ -91,9 +164,9 @@ for more details.
#### /ask What is this repo?
This is the source code to the popular django package.
This is collection of python functions that compute various math functions.
#### /help How do I use ollama?
#### /help How do I use aider with ollama?
Run `aider --model ollama/<ollama-model>`.
See these docs for more info: https://aider.chat/docs/llms/ollama.html
@@ -122,8 +195,6 @@ builtin.
This way you don't have to maintain a custom factorial implementation,
and the builtin function is well optimized.
> Edit the files? (Y)es/(N)o [Yes]: Yes
```python
<<<<<<< SEARCH
def factorial(n):

View File

@@ -69,23 +69,23 @@ cog.out(text)
]]]-->
<a href="https://github.com/Aider-AI/aider" class="github-badge badge-stars" title="Total number of GitHub stars the Aider project has received">
<span class="badge-label">⭐ GitHub Stars</span>
<span class="badge-value">30K</span>
<span class="badge-value">31K</span>
</a>
<a href="https://pypi.org/project/aider-chat/" class="github-badge badge-installs" title="Total number of installations via pip from PyPI">
<span class="badge-label">📦 Installs</span>
<span class="badge-value">1.7M</span>
<span class="badge-value">1.9M</span>
</a>
<div class="github-badge badge-tokens" title="Number of tokens processed weekly by Aider users">
<span class="badge-label">📈 Tokens/week</span>
<span class="badge-value">15B</span>
</div>
<a href="https://openrouter.ai/" class="github-badge badge-router" title="Aider's ranking among applications on the OpenRouter platform">
<a href="https://openrouter.ai/#options-menu" class="github-badge badge-router" title="Aider's ranking among applications on the OpenRouter platform">
<span class="badge-label">🏆 OpenRouter</span>
<span class="badge-value">Top 20</span>
</a>
<a href="/HISTORY.html" class="github-badge badge-coded" title="Percentage of the new code in Aider's last release written by Aider itself">
<span class="badge-label">🔄 Singularity</span>
<span class="badge-value">65%</span>
<span class="badge-value">86%</span>
</a>
<!--[[[end]]]-->
</div>

View File

@@ -82,6 +82,7 @@ You can run `./benchmark/benchmark.py --help` for a list of all the arguments, b
- `--threads` specifies how many exercises to benchmark in parallel. Start with a single thread if you are working out the kinks on your benchmarking setup or working with a new model, etc. Once you are getting reliable results, you can speed up the process by running with more threads. 10 works well against the OpenAI APIs.
- `--num-tests` specifies how many of the tests to run before stopping. This is another way to start gently as you debug your benchmarking setup.
- `--keywords` filters the tests to run to only the ones whose name match the supplied argument (similar to `pytest -k xxxx`).
- `--read-model-settings=<filename.yml>` specify model settings, see here: https://aider.chat/docs/config/adv-model-settings.html#model-settings
### Benchmark report

View File

@@ -4,7 +4,7 @@ aiohappyeyeballs==2.6.1
# via
# -c requirements/common-constraints.txt
# aiohttp
aiohttp==3.11.14
aiohttp==3.11.16
# via
# -c requirements/common-constraints.txt
# litellm
@@ -86,7 +86,7 @@ frozenlist==1.5.0
# -c requirements/common-constraints.txt
# aiohttp
# aiosignal
fsspec==2025.3.1
fsspec==2025.3.2
# via
# -c requirements/common-constraints.txt
# huggingface-hub
@@ -106,7 +106,7 @@ h11==0.14.0
# via
# -c requirements/common-constraints.txt
# httpcore
httpcore==1.0.7
httpcore==1.0.8
# via
# -c requirements/common-constraints.txt
# httpx
@@ -115,7 +115,7 @@ httpx==0.28.1
# -c requirements/common-constraints.txt
# litellm
# openai
huggingface-hub==0.29.3
huggingface-hub==0.30.2
# via
# -c requirements/common-constraints.txt
# tokenizers
@@ -143,7 +143,7 @@ jiter==0.9.0
# via
# -c requirements/common-constraints.txt
# openai
json5==0.10.0
json5==0.12.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
@@ -156,7 +156,7 @@ jsonschema-specifications==2024.10.1
# via
# -c requirements/common-constraints.txt
# jsonschema
litellm==1.65.0
litellm==1.65.7
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
@@ -184,7 +184,7 @@ monotonic==1.6
# via
# -c requirements/common-constraints.txt
# posthog
multidict==6.2.0
multidict==6.4.3
# via
# -c requirements/common-constraints.txt
# aiohttp
@@ -198,7 +198,7 @@ numpy==1.26.4
# -c requirements/common-constraints.txt
# scipy
# soundfile
openai==1.69.0
openai==1.73.0
# via
# -c requirements/common-constraints.txt
# litellm
@@ -224,7 +224,7 @@ pip==25.0.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
posthog==3.23.0
posthog==3.24.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
@@ -253,12 +253,12 @@ pycparser==2.22
# via
# -c requirements/common-constraints.txt
# cffi
pydantic==2.11.1
pydantic==2.11.3
# via
# -c requirements/common-constraints.txt
# litellm
# openai
pydantic-core==2.33.0
pydantic-core==2.33.1
# via
# -c requirements/common-constraints.txt
# pydantic
@@ -266,7 +266,7 @@ pydub==0.25.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
pyflakes==3.3.1
pyflakes==3.3.2
# via
# -c requirements/common-constraints.txt
# flake8
@@ -379,7 +379,7 @@ tree-sitter-embedded-template==0.23.2
# via
# -c requirements/common-constraints.txt
# tree-sitter-language-pack
tree-sitter-language-pack==0.6.1
tree-sitter-language-pack==0.7.1
# via
# -c requirements/common-constraints.txt
# grep-ast
@@ -387,7 +387,7 @@ tree-sitter-yaml==0.7.0
# via
# -c requirements/common-constraints.txt
# tree-sitter-language-pack
typing-extensions==4.13.0
typing-extensions==4.13.2
# via
# -c requirements/common-constraints.txt
# anyio
@@ -402,12 +402,12 @@ typing-inspection==0.4.0
# via
# -c requirements/common-constraints.txt
# pydantic
urllib3==2.3.0
urllib3==2.4.0
# via
# -c requirements/common-constraints.txt
# mixpanel
# requests
watchfiles==1.0.4
watchfiles==1.0.5
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
@@ -415,7 +415,7 @@ wcwidth==0.2.13
# via
# -c requirements/common-constraints.txt
# prompt-toolkit
yarl==1.18.3
yarl==1.19.0
# via
# -c requirements/common-constraints.txt
# aiohttp

View File

@@ -2,7 +2,7 @@
# uv pip compile --no-strip-extras --output-file=requirements/common-constraints.txt requirements/requirements.in requirements/requirements-browser.in requirements/requirements-dev.in requirements/requirements-help.in requirements/requirements-playwright.in
aiohappyeyeballs==2.6.1
# via aiohttp
aiohttp==3.11.14
aiohttp==3.11.16
# via
# huggingface-hub
# litellm
@@ -27,7 +27,7 @@ backoff==2.2.1
# via
# -r requirements/requirements.in
# posthog
banks==2.1.0
banks==2.1.1
# via llama-index-core
beautifulsoup4==4.13.3
# via -r requirements/requirements.in
@@ -103,13 +103,13 @@ filetype==1.2.0
# via llama-index-core
flake8==7.2.0
# via -r requirements/requirements.in
fonttools==4.56.0
fonttools==4.57.0
# via matplotlib
frozenlist==1.5.0
# via
# aiohttp
# aiosignal
fsspec==2025.3.1
fsspec==2025.3.2
# via
# huggingface-hub
# llama-index-core
@@ -147,7 +147,7 @@ greenlet==3.1.1
# sqlalchemy
grep-ast==0.8.1
# via -r requirements/requirements.in
griffe==1.7.1
griffe==1.7.2
# via banks
grpcio==1.71.0
# via
@@ -157,14 +157,14 @@ grpcio-status==1.71.0
# via google-api-core
h11==0.14.0
# via httpcore
httpcore==1.0.7
httpcore==1.0.8
# via httpx
httpx==0.28.1
# via
# litellm
# llama-index-core
# openai
huggingface-hub[inference]==0.29.3
huggingface-hub[inference]==0.30.2
# via
# llama-index-embeddings-huggingface
# sentence-transformers
@@ -201,7 +201,7 @@ joblib==1.4.2
# via
# nltk
# scikit-learn
json5==0.10.0
json5==0.12.0
# via -r requirements/requirements.in
jsonschema==4.23.0
# via
@@ -212,13 +212,13 @@ jsonschema-specifications==2024.10.1
# via jsonschema
kiwisolver==1.4.8
# via matplotlib
litellm==1.65.0
litellm==1.65.7
# via -r requirements/requirements.in
llama-index-core==0.12.26
# via
# -r requirements/requirements-help.in
# llama-index-embeddings-huggingface
llama-index-embeddings-huggingface==0.5.2
llama-index-embeddings-huggingface==0.5.3
# via -r requirements/requirements-help.in
lox==0.13.0
# via -r requirements/requirements-dev.in
@@ -240,7 +240,7 @@ monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.2.0
multidict==6.4.3
# via
# aiohttp
# yarl
@@ -248,7 +248,7 @@ multiprocess==0.70.17
# via pathos
mypy-extensions==1.0.0
# via typing-inspect
narwhals==1.32.0
narwhals==1.34.1
# via altair
nest-asyncio==1.6.0
# via llama-index-core
@@ -274,7 +274,7 @@ numpy==1.26.4
# soundfile
# streamlit
# transformers
openai==1.69.0
openai==1.73.0
# via litellm
packaging==24.2
# via
@@ -314,12 +314,14 @@ pip==25.0.1
pip-tools==7.4.1
# via -r requirements/requirements-dev.in
platformdirs==4.3.7
# via virtualenv
# via
# banks
# virtualenv
playwright==1.51.0
# via -r requirements/requirements-playwright.in
pluggy==1.5.0
# via pytest
posthog==3.23.0
posthog==3.24.1
# via -r requirements/requirements.in
pox==0.3.5
# via pathos
@@ -358,13 +360,13 @@ pycodestyle==2.13.0
# via flake8
pycparser==2.22
# via cffi
pydantic==2.11.1
pydantic==2.11.3
# via
# banks
# litellm
# llama-index-core
# openai
pydantic-core==2.33.0
pydantic-core==2.33.1
# via pydantic
pydeck==0.9.1
# via streamlit
@@ -372,7 +374,7 @@ pydub==0.25.1
# via -r requirements/requirements.in
pyee==12.1.1
# via playwright
pyflakes==3.3.1
pyflakes==3.3.2
# via flake8
pygments==2.19.1
# via rich
@@ -450,7 +452,7 @@ scipy==1.13.1
# sentence-transformers
semver==3.0.4
# via -r requirements/requirements-dev.in
sentence-transformers==4.0.1
sentence-transformers==4.0.2
# via llama-index-embeddings-huggingface
setuptools==78.1.0
# via pip-tools
@@ -477,11 +479,11 @@ soupsieve==2.6
# via beautifulsoup4
sqlalchemy[asyncio]==2.0.40
# via llama-index-core
streamlit==1.44.0
streamlit==1.44.1
# via -r requirements/requirements-browser.in
sympy==1.13.3
# via torch
tenacity==9.0.0
tenacity==9.1.2
# via
# llama-index-core
# streamlit
@@ -511,7 +513,7 @@ tqdm==4.67.1
# openai
# sentence-transformers
# transformers
transformers==4.50.3
transformers==4.51.2
# via sentence-transformers
tree-sitter==0.24.0
# via tree-sitter-language-pack
@@ -519,13 +521,13 @@ tree-sitter-c-sharp==0.23.1
# via tree-sitter-language-pack
tree-sitter-embedded-template==0.23.2
# via tree-sitter-language-pack
tree-sitter-language-pack==0.6.1
tree-sitter-language-pack==0.7.1
# via grep-ast
tree-sitter-yaml==0.7.0
# via tree-sitter-language-pack
typer==0.15.2
# via -r requirements/requirements-dev.in
typing-extensions==4.13.0
typing-extensions==4.13.2
# via
# altair
# anyio
@@ -552,15 +554,15 @@ typing-inspection==0.4.0
# via pydantic
tzdata==2025.2
# via pandas
urllib3==2.3.0
urllib3==2.4.0
# via
# mixpanel
# requests
uv==0.6.11
uv==0.6.14
# via -r requirements/requirements-dev.in
virtualenv==20.29.3
virtualenv==20.30.0
# via pre-commit
watchfiles==1.0.4
watchfiles==1.0.5
# via -r requirements/requirements.in
wcwidth==0.2.13
# via prompt-toolkit
@@ -570,7 +572,7 @@ wrapt==1.17.2
# via
# deprecated
# llama-index-core
yarl==1.18.3
yarl==1.19.0
# via aiohttp
zipp==3.21.0
# via importlib-metadata

View File

@@ -58,7 +58,7 @@ markupsafe==3.0.2
# via
# -c requirements/common-constraints.txt
# jinja2
narwhals==1.32.0
narwhals==1.34.1
# via
# -c requirements/common-constraints.txt
# altair
@@ -123,11 +123,11 @@ smmap==5.0.2
# via
# -c requirements/common-constraints.txt
# gitdb
streamlit==1.44.0
streamlit==1.44.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-browser.in
tenacity==9.0.0
tenacity==9.1.2
# via
# -c requirements/common-constraints.txt
# streamlit
@@ -139,7 +139,7 @@ tornado==6.4.2
# via
# -c requirements/common-constraints.txt
# streamlit
typing-extensions==4.13.0
typing-extensions==4.13.2
# via
# -c requirements/common-constraints.txt
# altair
@@ -149,7 +149,7 @@ tzdata==2025.2
# via
# -c requirements/common-constraints.txt
# pandas
urllib3==2.3.0
urllib3==2.4.0
# via
# -c requirements/common-constraints.txt
# requests

View File

@@ -54,7 +54,7 @@ filelock==3.18.0
# via
# -c requirements/common-constraints.txt
# virtualenv
fonttools==4.56.0
fonttools==4.57.0
# via
# -c requirements/common-constraints.txt
# matplotlib
@@ -285,7 +285,7 @@ typer==0.15.2
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
typing-extensions==4.13.0
typing-extensions==4.13.2
# via
# -c requirements/common-constraints.txt
# typer
@@ -293,15 +293,15 @@ tzdata==2025.2
# via
# -c requirements/common-constraints.txt
# pandas
urllib3==2.3.0
urllib3==2.4.0
# via
# -c requirements/common-constraints.txt
# requests
uv==0.6.11
uv==0.6.14
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
virtualenv==20.29.3
virtualenv==20.30.0
# via
# -c requirements/common-constraints.txt
# pre-commit

View File

@@ -4,7 +4,7 @@ aiohappyeyeballs==2.6.1
# via
# -c requirements/common-constraints.txt
# aiohttp
aiohttp==3.11.14
aiohttp==3.11.16
# via
# -c requirements/common-constraints.txt
# huggingface-hub
@@ -25,7 +25,7 @@ attrs==25.3.0
# via
# -c requirements/common-constraints.txt
# aiohttp
banks==2.1.0
banks==2.1.1
# via
# -c requirements/common-constraints.txt
# llama-index-core
@@ -75,7 +75,7 @@ frozenlist==1.5.0
# -c requirements/common-constraints.txt
# aiohttp
# aiosignal
fsspec==2025.3.1
fsspec==2025.3.2
# via
# -c requirements/common-constraints.txt
# huggingface-hub
@@ -85,7 +85,7 @@ greenlet==3.1.1
# via
# -c requirements/common-constraints.txt
# sqlalchemy
griffe==1.7.1
griffe==1.7.2
# via
# -c requirements/common-constraints.txt
# banks
@@ -93,7 +93,7 @@ h11==0.14.0
# via
# -c requirements/common-constraints.txt
# httpcore
httpcore==1.0.7
httpcore==1.0.8
# via
# -c requirements/common-constraints.txt
# httpx
@@ -101,7 +101,7 @@ httpx==0.28.1
# via
# -c requirements/common-constraints.txt
# llama-index-core
huggingface-hub[inference]==0.29.3
huggingface-hub[inference]==0.30.2
# via
# -c requirements/common-constraints.txt
# llama-index-embeddings-huggingface
@@ -130,7 +130,7 @@ llama-index-core==0.12.26
# -c requirements/common-constraints.txt
# -r requirements/requirements-help.in
# llama-index-embeddings-huggingface
llama-index-embeddings-huggingface==0.5.2
llama-index-embeddings-huggingface==0.5.3
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-help.in
@@ -146,7 +146,7 @@ mpmath==1.3.0
# via
# -c requirements/common-constraints.txt
# sympy
multidict==6.2.0
multidict==6.4.3
# via
# -c requirements/common-constraints.txt
# aiohttp
@@ -187,17 +187,21 @@ pillow==11.1.0
# -c requirements/common-constraints.txt
# llama-index-core
# sentence-transformers
platformdirs==4.3.7
# via
# -c requirements/common-constraints.txt
# banks
propcache==0.3.1
# via
# -c requirements/common-constraints.txt
# aiohttp
# yarl
pydantic==2.11.1
pydantic==2.11.3
# via
# -c requirements/common-constraints.txt
# banks
# llama-index-core
pydantic-core==2.33.0
pydantic-core==2.33.1
# via
# -c requirements/common-constraints.txt
# pydantic
@@ -233,7 +237,7 @@ scipy==1.13.1
# -c requirements/common-constraints.txt
# scikit-learn
# sentence-transformers
sentence-transformers==4.0.1
sentence-transformers==4.0.2
# via
# -c requirements/common-constraints.txt
# llama-index-embeddings-huggingface
@@ -249,7 +253,7 @@ sympy==1.13.3
# via
# -c requirements/common-constraints.txt
# torch
tenacity==9.0.0
tenacity==9.1.2
# via
# -c requirements/common-constraints.txt
# llama-index-core
@@ -278,11 +282,11 @@ tqdm==4.67.1
# nltk
# sentence-transformers
# transformers
transformers==4.50.3
transformers==4.51.2
# via
# -c requirements/common-constraints.txt
# sentence-transformers
typing-extensions==4.13.0
typing-extensions==4.13.2
# via
# -c requirements/common-constraints.txt
# anyio
@@ -304,7 +308,7 @@ typing-inspection==0.4.0
# via
# -c requirements/common-constraints.txt
# pydantic
urllib3==2.3.0
urllib3==2.4.0
# via
# -c requirements/common-constraints.txt
# requests
@@ -313,7 +317,7 @@ wrapt==1.17.2
# -c requirements/common-constraints.txt
# deprecated
# llama-index-core
yarl==1.18.3
yarl==1.19.0
# via
# -c requirements/common-constraints.txt
# aiohttp

View File

@@ -12,7 +12,7 @@ pyee==12.1.1
# via
# -c requirements/common-constraints.txt
# playwright
typing-extensions==4.13.0
typing-extensions==4.13.2
# via
# -c requirements/common-constraints.txt
# pyee

View File

@@ -288,7 +288,7 @@ src="https://img.shields.io/github/stars/Aider-AI/aider?style=flat-square&logo=g
src="https://img.shields.io/badge/📦%20Installs-{downloads_formatted}-2ecc71?style=flat-square&labelColor=555555"/></a>
<img alt="Tokens per week" title="{TOKENS_WEEKLY_TOOLTIP}"
src="https://img.shields.io/badge/📈%20Tokens%2Fweek-{TOKENS_PER_WEEK}-3498db?style=flat-square&labelColor=555555"/>
<a href="https://openrouter.ai/"><img alt="OpenRouter Ranking" title="{OPENROUTER_TOOLTIP}"
<a href="https://openrouter.ai/#options-menu"><img alt="OpenRouter Ranking" title="{OPENROUTER_TOOLTIP}"
src="https://img.shields.io/badge/🏆%20OpenRouter-Top%2020-9b59b6?style=flat-square&labelColor=555555"/></a>
<a href="https://aider.chat/HISTORY.html"><img alt="Singularity" title="{SINGULARITY_TOOLTIP}"
src="https://img.shields.io/badge/🔄%20Singularity-{aider_percent_rounded}%25-e74c3c?style=flat-square&labelColor=555555"/></a>""" # noqa
@@ -398,7 +398,7 @@ def get_badges_html():
<span class="badge-label">📈 Tokens/week</span>
<span class="badge-value">{TOKENS_PER_WEEK}</span>
</div>
<a href="https://openrouter.ai/" class="github-badge badge-router" title="{OPENROUTER_TOOLTIP}">
<a href="https://openrouter.ai/#options-menu" class="github-badge badge-router" title="{OPENROUTER_TOOLTIP}">
<span class="badge-label">🏆 OpenRouter</span>
<span class="badge-value">Top 20</span>
</a>

View File

@@ -81,15 +81,20 @@ def main():
parser.add_argument(
"--dry-run", action="store_true", help="Print each step without actually executing them"
)
parser.add_argument("--force", action="store_true", help="Skip pre-push checks")
args = parser.parse_args()
dry_run = args.dry_run
force = args.force
# Perform checks before proceeding
check_branch()
check_working_directory_clean()
check_main_branch_up_to_date()
check_ok_to_push()
# Perform checks before proceeding unless --force is used
if not force:
check_branch()
check_working_directory_clean()
check_main_branch_up_to_date()
check_ok_to_push()
else:
print("Skipping pre-push checks due to --force flag.")
new_version_str = args.new_version
if not re.match(r"^\d+\.\d+\.\d+$", new_version_str):

View File

@@ -194,8 +194,8 @@ class TestCoder(unittest.TestCase):
mock.return_value = set([str(fname1), str(fname2), str(fname3)])
coder.repo.get_tracked_files = mock
# Check that file mentions skip files with duplicate basenames
mentioned = coder.get_file_mentions(f"Check {fname2} and {fname3}")
# Check that file mentions of a pure basename skips files with duplicate basenames
mentioned = coder.get_file_mentions(f"Check {fname2.name} and {fname3}")
self.assertEqual(mentioned, {str(fname3)})
# Add a read-only file with same basename
@@ -366,6 +366,45 @@ class TestCoder(unittest.TestCase):
f"Failed to extract mentions from: {content}",
)
def test_get_file_mentions_multiline_backticks(self):
with GitTemporaryDirectory():
io = InputOutput(pretty=False, yes=True)
coder = Coder.create(self.GPT35, None, io)
# Create test files
test_files = [
"swebench/harness/test_spec/python.py",
"swebench/harness/test_spec/javascript.py",
]
for fname in test_files:
fpath = Path(fname)
fpath.parent.mkdir(parents=True, exist_ok=True)
fpath.touch()
# Mock get_addable_relative_files to return our test files
coder.get_addable_relative_files = MagicMock(return_value=set(test_files))
# Input text with multiline backticked filenames
content = """
Could you please **add the following files to the chat**?
1. `swebench/harness/test_spec/python.py`
2. `swebench/harness/test_spec/javascript.py`
Once I have these, I can show you precisely how to do the thing.
"""
expected_mentions = {
"swebench/harness/test_spec/python.py",
"swebench/harness/test_spec/javascript.py",
}
mentioned_files = coder.get_file_mentions(content)
self.assertEqual(
mentioned_files,
expected_mentions,
f"Failed to extract mentions from multiline backticked content: {content}",
)
def test_get_file_mentions_path_formats(self):
with GitTemporaryDirectory():
io = InputOutput(pretty=False, yes=True)

View File

@@ -79,4 +79,6 @@ def test_openrouter_error():
ex_info = ex.get_ex_info(openrouter_error)
assert ex_info.retry is True
assert "OpenRouter API provider is down" in ex_info.description
assert "OpenRouter" in ex_info.description
assert "overloaded" in ex_info.description
assert "rate" in ex_info.description