Compare commits

...

337 Commits

Author SHA1 Message Date
Paul Gauthier
9518193d0a set version to 0.81.4.dev 2025-04-12 13:25:55 -07:00
Paul Gauthier
60a2b799e6 version bump to 0.81.3 2025-04-12 13:25:52 -07:00
Paul Gauthier
9d7dc00f25 copy 2025-04-12 08:58:50 -07:00
Paul Gauthier
882e7b6716 bump deps 2025-04-12 08:58:09 -07:00
Paul Gauthier
8ba29ee8e6 copy 2025-04-12 08:57:18 -07:00
Paul Gauthier
3f67c41759 copy 2025-04-12 08:09:46 -07:00
Paul Gauthier
7fbeafa1cf copy 2025-04-12 08:06:39 -07:00
Paul Gauthier
028257480b copy 2025-04-11 14:26:04 +12:00
Paul Gauthier
e42a0c45b6 Merge branch 'main' of github.com:Aider-AI/aider 2025-04-11 14:18:07 +12:00
Paul Gauthier
1e7f8549ff add grok3mini high 2025-04-11 14:11:39 +12:00
paul-gauthier
668de71f9d Merge pull request #3776 from peterhadlaw/master
Do not lowercase the _entirety_ of the commit message
2025-04-11 14:01:03 +12:00
Paul Gauthier
067245b810 chore: update grok model settings to remove/comment out params 2025-04-11 13:41:11 +12:00
Peter Hadlaw
8f236c69e1 fix: Do not lowercase the entirety of the commit message 2025-04-10 20:08:40 -05:00
Paul Gauthier
8ee33da114 copy 2025-04-11 09:08:06 +12:00
Paul Gauthier (aider)
2fedc2e699 feat: update openrouter badges to link to options menu 2025-04-11 08:38:22 +12:00
Paul Gauthier
1961543e2f set version to 0.81.3.dev 2025-04-11 08:37:59 +12:00
Paul Gauthier
b4f65734a5 version bump to 0.81.2 2025-04-11 08:37:56 +12:00
Paul Gauthier (aider)
0eb80553f6 style: Apply linter to versionbump.py 2025-04-11 08:37:37 +12:00
Paul Gauthier (aider)
110c63ae95 feat: Add --force flag to skip pre-push checks 2025-04-11 08:37:33 +12:00
Paul Gauthier
57304536bf copy 2025-04-11 08:23:16 +12:00
Paul Gauthier
a9ca5da139 feat: add grok-3-mini-beta to model settings with reasoning_effort 2025-04-11 08:22:44 +12:00
Paul Gauthier
947aebfbe0 copy 2025-04-11 08:13:15 +12:00
Paul Gauthier
fafc9268d4 feat: set grok-3-mini-beta edit_format to whole 2025-04-11 08:12:38 +12:00
Paul Gauthier
65a5d55436 feat: add grok-3-beta and grok-3-mini-beta model settings 2025-04-11 08:10:18 +12:00
Paul Gauthier (aider)
96b350400f feat: make highlight parameter case-insensitive in leaderboard 2025-04-11 07:28:01 +12:00
Paul Gauthier
7983b4caf2 copy 2025-04-11 07:25:10 +12:00
Paul Gauthier (aider)
e44122f1be fix: correct groq to grok typo in model settings yaml 2025-04-11 07:23:16 +12:00
Paul Gauthier (aider)
42618d7ec6 fix: correct groq to grok typos in model names and aliases 2025-04-11 07:23:05 +12:00
Paul Gauthier
1d0167bbf4 feat: add polyglot leaderboard entries for grok3 and optimus alpha 2025-04-11 07:17:56 +12:00
Paul Gauthier (aider)
43d4b21b23 feat: add "optimus" alias for openrouter model 2025-04-11 07:12:27 +12:00
Paul Gauthier (aider)
562171c548 feat: add grok3 alias for xai/groq-3-beta 2025-04-11 07:12:09 +12:00
Paul Gauthier (aider)
8dccecdd9f feat: add xai/groq-3-beta and xai/groq-3-mini-beta models 2025-04-11 07:10:23 +12:00
Paul Gauthier
940ae364d7 feat: add model settings for grok-3-beta, grok-3-mini-beta, optimus-alpha 2025-04-11 07:09:27 +12:00
Paul Gauthier (aider)
532bc454c5 feat: add openrouter/openrouter/optimus-alpha model metadata 2025-04-11 06:54:12 +12:00
Paul Gauthier (aider)
14ffe7782c feat: add openrouter/x-ai/grok-3-mini-beta model metadata 2025-04-11 06:37:33 +12:00
Paul Gauthier (aider)
2dd40fce44 feat: add openrouter/x-ai/grok-3-beta model metadata 2025-04-10 16:18:22 +12:00
Paul Gauthier (aider)
0c8bc46e28 fix: strip trailing } from urls extracted from error messages 2025-04-10 16:06:07 +12:00
Paul Gauthier
7d0dd29937 Merge branch 'main' of github.com:Aider-AI/aider 2025-04-08 18:33:41 +12:00
Paul Gauthier
349cd77821 copy 2025-04-08 08:10:21 +12:00
paul-gauthier
dc2d7b1dfe Merge pull request #3752 from tylersatre/main 2025-04-08 06:31:41 +12:00
Tyler Satre
be30329288 Update azure documentation 2025-04-07 11:04:15 -04:00
Paul Gauthier
71446d9f3c fix: get_file_mentions skips pure basenames with duplicates 2025-04-07 13:22:05 +12:00
Paul Gauthier (aider)
c9d4c8d09b fix: allow adding files by full path with existing basename 2025-04-07 13:19:25 +12:00
Paul Gauthier (aider)
c580ffdb70 test: add test for multiline backtick file mentions 2025-04-07 13:14:27 +12:00
Paul Gauthier
f46deb4eb7 improve diff-fenced prompts 2025-04-07 08:54:01 +12:00
Paul Gauthier
b3215bed48 copy 2025-04-07 08:13:22 +12:00
Paul Gauthier
2a9ab02753 chore: update polyglot leaderboard data 2025-04-07 08:12:37 +12:00
Paul Gauthier (aider)
0da586154d fix: quote values with '#' in sample aider.conf.yml config 2025-04-07 08:09:33 +12:00
Paul Gauthier
26d736551d Merge branch 'main' of github.com:Aider-AI/aider 2025-04-07 08:05:15 +12:00
paul-gauthier
9445a3118b Merge pull request #3733 from banjo/main 2025-04-06 07:28:56 +12:00
paul-gauthier
a2c46c7436 Merge pull request #3735 from KennyDizi/main 2025-04-06 07:20:22 +12:00
paul-gauthier
8df7a0960e Merge pull request #3736 from FelixLisczyk/tl-173 2025-04-06 07:19:30 +12:00
Felix Lisczyk
e7f35e7a35 Add Fireworks AI model 'deepseek-v3-0324' 2025-04-05 16:33:09 +02:00
Kenny Dizi
088e80e38b Add support model openrouter/google/gemini-2.5-pro-preview-03-25 2025-04-05 20:53:06 +07:00
Kenny Dizi
2d65c7f387 Remove trailing spaces 2025-04-05 20:51:11 +07:00
Anton Ödman
94db758eb7 fix: follow conventional commits examples by going all lowercase 2025-04-05 13:07:34 +02:00
Paul Gauthier
2bfb615d68 set version to 0.81.2.dev 2025-04-05 09:01:12 +13:00
Paul Gauthier
87275140f9 version bump to 0.81.1 2025-04-05 09:01:09 +13:00
Paul Gauthier
0672a68ba4 copy 2025-04-05 08:58:10 +13:00
Paul Gauthier (aider)
246e3ccfad refactor: Rename gemini-free alias to gemini-exp in MODEL_ALIASES 2025-04-05 08:56:56 +13:00
Paul Gauthier (aider)
b275ee919f feat: Update gemini model alias and add gemini-free alias 2025-04-05 08:56:35 +13:00
Paul Gauthier (aider)
eda796d5e0 feat: Add metadata and settings for gemini-2.5-pro-preview-03-25 2025-04-05 08:54:45 +13:00
Paul Gauthier
d1b3917309 copy 2025-04-04 22:08:49 +13:00
Paul Gauthier
ffee2b971f blame 2025-04-04 22:08:08 +13:00
Paul Gauthier
b9a80f9c8c set version to 0.81.1.dev 2025-04-04 22:02:37 +13:00
Paul Gauthier
980f673ce2 version bump to 0.81.0 2025-04-04 22:02:34 +13:00
Paul Gauthier
55767a0003 copy 2025-04-04 21:56:25 +13:00
Paul Gauthier
fb44bebe40 copy 2025-04-04 21:53:47 +13:00
Paul Gauthier (aider)
b79f072499 feat: Add alias "quasar" for openrouter/openrouter/quasar-alpha 2025-04-04 21:53:12 +13:00
Paul Gauthier (aider)
d65a2e8b51 fix: Exclude double quotes from detected URLs 2025-04-04 21:48:17 +13:00
Paul Gauthier (aider)
e0b42d51db fix: Do not retry litellm.APIError for insufficient credits. 2025-04-04 21:45:56 +13:00
Paul Gauthier (aider)
c057dc9466 Feat: Add model metadata for openrouter/openrouter/quasar-alpha 2025-04-04 21:43:19 +13:00
Paul Gauthier (aider)
fff53a94d3 fix: Import offer_openrouter_oauth from aider/onboarding.py 2025-04-04 21:41:40 +13:00
Paul Gauthier (aider)
12beedd0a6 style: Run linter to fix line lengths and formatting issues 2025-04-04 21:40:47 +13:00
Paul Gauthier (aider)
80f60a7394 feat: Offer OpenRouter OAuth if model specified but API key is missing 2025-04-04 21:40:41 +13:00
Paul Gauthier
2359348505 copy 2025-04-04 21:39:47 +13:00
Paul Gauthier
63e3e06a8c copy 2025-04-04 18:53:12 +13:00
Paul Gauthier
dca92b580c add openrouter/openrouter/quasar-alpha 2025-04-04 18:52:59 +13:00
Paul Gauthier
24e2960092 add openrouter/openrouter/quasar-alpha 2025-04-04 18:52:16 +13:00
Paul Gauthier (aider)
be1a52c5c1 feat: Read highlight model from query string 2025-04-04 16:11:54 +13:00
Paul Gauthier
8a34a6c8f4 set version to 0.80.5.dev 2025-04-04 15:34:08 +13:00
Paul Gauthier
7924ea9bb9 version bump to 0.80.4 2025-04-04 15:34:04 +13:00
Paul Gauthier
a3a17ae792 copy 2025-04-04 15:31:04 +13:00
Paul Gauthier
f8801d811b feat: Remove max_tokens from deepseek model settings 2025-04-04 15:25:36 +13:00
Paul Gauthier
425284ac62 copy 2025-04-04 15:09:08 +13:00
Paul Gauthier
4872cdf905 copy 2025-04-04 15:08:21 +13:00
Paul Gauthier
88cd81c692 set version to 0.80.4.dev 2025-04-04 08:30:42 +13:00
Paul Gauthier
d45ecd0800 version bump to 0.80.3 2025-04-04 08:30:39 +13:00
Paul Gauthier
4bfcef60f4 copy 2025-04-04 07:58:59 +13:00
Paul Gauthier
e9b7e933f5 copy 2025-04-04 07:54:07 +13:00
Paul Gauthier
e5301cef49 copy 2025-04-04 07:52:16 +13:00
Paul Gauthier
01ca552174 copy 2025-04-04 07:49:36 +13:00
Paul Gauthier
4529d73bf3 feat: Add model metadata for openrouter/google/gemini-2.0-flash-exp:free 2025-04-03 08:43:19 +13:00
Paul Gauthier
0798906a51 Merge branch 'main' into gemini-weak-flash 2025-04-03 08:34:41 +13:00
Paul Gauthier
8547c24dac set version to 0.80.3.dev 2025-04-03 08:33:40 +13:00
Paul Gauthier
0e1e1aae2e version bump to 0.80.2 2025-04-03 08:33:36 +13:00
Paul Gauthier (aider)
9cc31e4087 feat: Configure weak models for Gemini 2.5 Pro 2025-04-03 08:12:27 +13:00
Paul Gauthier
e9c7555bb9 chore: Add TODO comment for Gemini 2.5 Pro models 2025-04-03 08:12:20 +13:00
Paul Gauthier
6f897fec59 copy 2025-04-03 08:10:13 +13:00
Paul Gauthier
8c3d77f4c7 bump deps to pickup https://github.com/BerriAI/litellm/pull/9667 2025-04-03 08:07:30 +13:00
Paul Gauthier
f9b60d83ac copy 2025-04-02 20:15:37 +13:00
Paul Gauthier (aider)
3992681b84 ci: Add Windows workflow to check PyPI version 2025-04-01 21:19:14 +13:00
Paul Gauthier
340bd78259 Revert "ci: Add Windows to check_pypi_version matrix and improve compatibility"
This reverts commit 12a46275a2.
2025-04-01 21:18:31 +13:00
Paul Gauthier (aider)
12a46275a2 ci: Add Windows to check_pypi_version matrix and improve compatibility 2025-04-01 21:17:28 +13:00
Paul Gauthier
b56234f1c9 copy 2025-04-01 21:15:25 +13:00
Paul Gauthier (aider)
60859ec2b9 ci: Fix latest tag detection to exclude dev tags 2025-04-01 21:14:24 +13:00
Paul Gauthier
0a840860f1 docs: Add comment explaining PyPI check workflow purpose 2025-04-01 21:14:17 +13:00
Paul Gauthier (aider)
cebae18dd6 ci: Correct version extraction in check_pypi_version workflow 2025-04-01 21:12:24 +13:00
Paul Gauthier (aider)
9c9c6b6591 ci: Improve robustness of aider version check in CI 2025-04-01 21:10:36 +13:00
Paul Gauthier (aider)
ca0ffc66d1 ci: Run check_pypi_version job across Python 3.9-3.12 2025-04-01 21:08:17 +13:00
Paul Gauthier (aider)
b0623f04fe ci: Add GitHub Action to verify PyPI version matches latest tag 2025-04-01 21:03:20 +13:00
Paul Gauthier
2dec862ea6 copy 2025-04-01 17:08:27 +13:00
Paul Gauthier
f18fe53a9a set version to 0.80.2.dev 2025-04-01 17:06:41 +13:00
Paul Gauthier
73348de2b4 version bump to 0.80.1 2025-04-01 17:06:37 +13:00
Paul Gauthier
f4a418bfcd copy 2025-04-01 17:03:58 +13:00
Paul Gauthier
50588800f5 copy 2025-04-01 16:15:19 +13:00
Paul Gauthier
2762215d66 copy 2025-04-01 16:14:02 +13:00
Paul Gauthier
4e53797aac Merge branch 'main' of github.com:Aider-AI/aider 2025-04-01 16:13:21 +13:00
Paul Gauthier
b24ac4b3a2 pin to avoid yanked versions #3699 2025-04-01 16:13:13 +13:00
paul-gauthier
88ab6afd3e Merge pull request #3698 from aj47/patch-1
Update benchmark README.md to specify how to config other settings
2025-04-01 15:27:38 +13:00
Paul Gauthier
5c5db0a961 noop 2025-04-01 15:27:05 +13:00
AJ (@techfren)
587186d96c Update benchmark README.md to specify how to config other settings 2025-03-31 17:05:53 -07:00
Paul Gauthier
d9ddf93f83 copy 2025-04-01 08:37:08 +13:00
Paul Gauthier
d3882d3513 Merge branch 'main' of github.com:Aider-AI/aider 2025-04-01 08:28:00 +13:00
paul-gauthier
a458215bbb Merge pull request #3692 from claui/requests 2025-04-01 06:53:26 +13:00
Claudia Pellegrino
7ae0fa3775 chore: remove redundant code
1. The module already imports `requests`, so by the time this check is
   called, the module is already loaded.

2. Even if the code path were taken, it would fail anyway, because the
   `aider[oauth]` extra was hallucinated and does not exist.

3. Downstream distributions usually have managed Python environments,
   where pip cannot be used at all.
   That means distros must patch out every such pip invocation
   (example: [1]; full disclosure: I maintain this but other distros
   will eventually bump into the same issues). Restricting at-runtime
   pip usage to the minimum necessary is friendlier to distro
   maintainers.

[1]: https://aur.archlinux.org/cgit/aur.git/tree/archlinux-use-system.patch?h=aider-chat&id=7f8156946857215104bce151454ad0101ade4a48
2025-03-31 19:13:41 +02:00
Paul Gauthier
f1695f8b15 copy 2025-03-31 19:56:59 +13:00
Paul Gauthier
4c08bbb9e5 copy 2025-03-31 19:34:36 +13:00
Paul Gauthier
9b55ff8c4c copy 2025-03-31 19:32:36 +13:00
Paul Gauthier
2096d2b786 copy 2025-03-31 19:27:29 +13:00
Paul Gauthier
70196cd6fd copy 2025-03-31 16:24:13 +13:00
Paul Gauthier
c2cba97722 copy 2025-03-31 14:32:36 +13:00
Paul Gauthier
7534ebd145 blame 2025-03-31 14:28:44 +13:00
Paul Gauthier
6b2331340b set version to 0.80.1.dev 2025-03-31 14:19:36 +13:00
Paul Gauthier
da7b5005fe version bump to 0.80.0 2025-03-31 14:19:32 +13:00
Paul Gauthier
9210e12316 copy 2025-03-31 14:13:54 +13:00
Paul Gauthier (aider)
2c47a79c38 ci: Skip languages.md in codespell check 2025-03-31 14:13:12 +13:00
Paul Gauthier
48cebef974 copy 2025-03-31 14:10:11 +13:00
Paul Gauthier (aider)
52952efd33 test: verify load_dotenv_files override behavior 2025-03-31 11:49:20 +13:00
Paul Gauthier
30dfd28ac4 copy 2025-03-31 11:46:37 +13:00
Paul Gauthier
b5a04f05f3 copy 2025-03-31 11:44:13 +13:00
Paul Gauthier (aider)
d5a34dcbc5 style: Fix trailing whitespace in homepage script 2025-03-31 11:42:47 +13:00
Paul Gauthier (aider)
fc6a05ced6 fix: Improve testimonial parsing for different dash formats 2025-03-31 11:42:40 +13:00
Paul Gauthier (aider)
2d3162a90b style: Remove trailing whitespace 2025-03-31 11:41:51 +13:00
Paul Gauthier (aider)
83c599e741 fix: Improve testimonial parsing from README 2025-03-31 11:41:44 +13:00
Paul Gauthier (aider)
c7f1671d5a feat: Generate testimonials JS in homepage script 2025-03-31 11:39:57 +13:00
Paul Gauthier
9f2d945691 kind words 2025-03-31 11:34:40 +13:00
Paul Gauthier
36ff099145 llama-index-core==0.12.26 2025-03-31 11:32:31 +13:00
Paul Gauthier
120e010e48 llama-index-core==0.12.24.post1 2025-03-31 11:25:07 +13:00
Paul Gauthier
2887816cf0 remove copilot 2025-03-31 11:22:14 +13:00
Paul Gauthier
9848479306 bump deps 2025-03-31 11:20:17 +13:00
Paul Gauthier
b662e6b9eb fix: Handle GitCommandNotFound error 2025-03-31 10:50:01 +13:00
Paul Gauthier
258f1f0848 copy 2025-03-31 10:42:52 +13:00
Paul Gauthier (aider)
a07f312089 style: Apply linter fixes 2025-03-31 10:33:00 +13:00
Paul Gauthier (aider)
605d8fe59a fix: Fix ColorParseError by ensuring hex colors have # prefix 2025-03-31 10:32:48 +13:00
Paul Gauthier (aider)
1c7db4da0d style: Apply linter fixes 2025-03-31 10:27:12 +13:00
Paul Gauthier (aider)
b0acc95b01 fix: Add missing ColorParseError import 2025-03-31 10:27:02 +13:00
Paul Gauthier (aider)
5bcad73515 refactor: Validate color settings once during initialization 2025-03-31 10:26:08 +13:00
Paul Gauthier (aider)
db05754d29 style: Apply linter formatting 2025-03-31 10:17:31 +13:00
Paul Gauthier (aider)
dfe3457906 test: Add test for cmd_test returning output on failure 2025-03-31 10:17:21 +13:00
Paul Gauthier (aider)
7dbb1a2aa8 fix: Return test errors from cmd_run to enable auto-fixing 2025-03-31 10:15:14 +13:00
Paul Gauthier (aider)
83dac4aae2 style: Improve formatting of OpenRouter key exchange error message 2025-03-31 09:17:32 +13:00
Paul Gauthier (aider)
75b79fa002 fix: Correct HTTPError status code access in onboarding 2025-03-31 09:17:26 +13:00
Paul Gauthier (aider)
27c1fd0262 fix: Handle FileNotFoundError in find_common_root 2025-03-31 09:15:26 +13:00
Paul Gauthier (aider)
8069e06f43 feat: Add openrouter deepseek-chat-v3-0324:free model 2025-03-31 09:12:39 +13:00
Paul Gauthier (aider)
8cd106fc8a fix: Prevent UnboundLocalError in get_tracked_files on IndexError 2025-03-31 09:10:29 +13:00
Paul Gauthier (aider)
a9c9877580 feat: Add free DeepSeek chat model configuration to model metadata 2025-03-31 09:08:51 +13:00
Paul Gauthier
19e1201c8a add google-cloud-bigquery as dev dep 2025-03-31 09:04:41 +13:00
Paul Gauthier (aider)
912f98e6eb fix: Remove unused import mock_open 2025-03-31 09:04:26 +13:00
Paul Gauthier (aider)
b6808e3700 test: Remove failing OpenRouter OAuth flow test 2025-03-31 08:53:21 +13:00
Paul Gauthier (aider)
a4f78b60e0 fix: Fix unused import and variable in onboarding tests 2025-03-31 08:51:19 +13:00
Paul Gauthier (aider)
6c9906c639 style: Fix whitespace and formatting in onboarding tests 2025-03-31 08:50:59 +13:00
Paul Gauthier (aider)
8a90af6779 fix: Remove redundant threading patch in onboarding test 2025-03-31 08:50:51 +13:00
Paul Gauthier
9831a13284 test: Simplify OpenRouter OAuth flow test 2025-03-31 08:50:45 +13:00
Paul Gauthier (aider)
d2386bc1f6 test: mock and assert offer_openrouter_oauth call in no-key test 2025-03-31 08:50:32 +13:00
Paul Gauthier
5b10af7b1a test: Disable streaming in main test call 2025-03-31 08:50:28 +13:00
Paul Gauthier (aider)
eacf3cc4ed test: Fix assertion failure and type error in onboarding tests 2025-03-31 08:44:32 +13:00
Paul Gauthier (aider)
87090139f6 test: Refactor onboarding test imports 2025-03-31 08:42:03 +13:00
Paul Gauthier (aider)
650c4cf948 style: Fix flake8 errors in onboarding tests 2025-03-31 08:41:35 +13:00
Paul Gauthier (aider)
24c074eeaa style: Apply linter fixes to onboarding tests 2025-03-31 08:41:11 +13:00
Paul Gauthier (aider)
b54629addb test: Add unit tests for onboarding functions 2025-03-31 08:41:05 +13:00
Paul Gauthier
cd67d11ecf test: Add onboarding tests 2025-03-31 08:40:59 +13:00
Paul Gauthier (aider)
16bb0c93e7 feat: Warn when using --stream and --cache-prompts together 2025-03-31 08:40:17 +13:00
Paul Gauthier
7c40c3a61c copy 2025-03-31 08:38:30 +13:00
Paul Gauthier
b8e8b7496d Merge branch 'main' of github.com:Aider-AI/aider 2025-03-31 08:36:57 +13:00
Paul Gauthier
d29d5e3a47 copy 2025-03-31 08:24:03 +13:00
paul-gauthier
db261d0fa4 Merge pull request #2980 from ivnvxd/fix-completion-menu-styling 2025-03-29 17:17:53 -10:00
Paul Gauthier
b7f6b847d6 Revert "bump deps"
This reverts commit 61147dfecf.
2025-03-30 11:05:38 +13:00
Paul Gauthier
5311a842a5 Revert "limit sentence-transformers<4 since 4.0.1 fails in GitHub Actions ubuntu and windows?"
This reverts commit e288f59da7.
2025-03-30 11:05:24 +13:00
Paul Gauthier
e288f59da7 limit sentence-transformers<4 since 4.0.1 fails in GitHub Actions ubuntu and windows? 2025-03-30 10:56:04 +13:00
Paul Gauthier
c62ceb5db1 test: Fix expected edit format in /model command tests 2025-03-28 20:27:12 -10:00
Paul Gauthier (aider)
35decf122d test: Add test for /model updating default edit format 2025-03-28 20:21:58 -10:00
Paul Gauthier (aider)
2adfe1507b test: Add tests for /model command edit_format behavior 2025-03-28 20:21:39 -10:00
Paul Gauthier (aider)
5516e6b279 feat: Update edit format on /model switch if using default 2025-03-28 20:20:46 -10:00
Paul Gauthier (aider)
8b811c610a refactor: Use get_raw_thinking_tokens in cmd_think_tokens 2025-03-28 20:12:04 -10:00
Paul Gauthier
23348f8e65 refactor: Remove redundant model arg from get_thinking/reasoning calls 2025-03-28 20:10:43 -10:00
Paul Gauthier (aider)
e1c3a2f8cf fix: Use self instead of model argument in get_reasoning_effort 2025-03-28 20:09:38 -10:00
Paul Gauthier (aider)
0b0493fa21 style: Remove unnecessary parentheses in conditions 2025-03-28 20:09:11 -10:00
Paul Gauthier (aider)
14d1742869 fix: Use self instead of model in get_raw_thinking_tokens 2025-03-28 20:09:05 -10:00
Paul Gauthier
96aa77288b refactor: Separate raw thinking token retrieval and fix self access 2025-03-28 20:09:01 -10:00
Paul Gauthier
a4c9c10029 style: Allow horizontal x-axis labels on leaderboard chart 2025-03-28 19:49:23 -10:00
Paul Gauthier (aider)
c9d561e7ad fix: Prevent leaderboard x-axis labels from disappearing 2025-03-28 19:43:26 -10:00
Paul Gauthier
87ba63c14c docs: Add chatgpt-4o-latest benchmark results 2025-03-28 19:36:06 -10:00
Paul Gauthier
0decbad7d0 Merge branch 'main' of github.com:Aider-AI/aider 2025-03-28 19:22:57 -10:00
Paul Gauthier
0e647dbc0e move 2025-03-28 19:21:44 -10:00
Paul Gauthier
2540d28b34 docs: Update Copilot documentation 2025-03-28 19:21:30 -10:00
Paul Gauthier (aider)
19a5e5bb00 docs: Add docs for Github Copilot models 2025-03-28 19:16:48 -10:00
Paul Gauthier
f22afc6458 docs: Add copilot.md 2025-03-28 19:16:44 -10:00
Paul Gauthier
ab00415ca1 Merge branch 'main' into feat/add_copilot 2025-03-28 19:13:54 -10:00
paul-gauthier
8df5406986 Merge pull request #3192 from miradnanali/fix-filename-quoting-for-lint
fix: Use shlex.quote() to enable linting filepaths containing shell metacharacters
2025-03-28 19:07:53 -10:00
Paul Gauthier
7f05159f0f Merge branch 'main' of github.com:Aider-AI/aider 2025-03-28 19:03:40 -10:00
Paul Gauthier
61147dfecf bump deps 2025-03-28 19:02:23 -10:00
paul-gauthier
06da133aac Merge pull request #3633 from susliko/scala-repomap
feat: add repomap support for Scala
2025-03-28 19:00:50 -10:00
Paul Gauthier
ff1d047048 docs: Add documentation for Ctrl-X Ctrl-E editor binding 2025-03-28 19:00:43 -10:00
paul-gauthier
4a8b17cb84 Merge pull request #3659 from iamFIREcracker/cx-ce-editor-key-binding
Invoke the editor by pressing `C-x C-e`
2025-03-28 18:58:13 -10:00
Paul Gauthier
fbafc09e6a copy 2025-03-28 18:54:20 -10:00
Paul Gauthier
c3c960383e feat: Offer OpenRouter OAuth if no model detected 2025-03-28 18:51:35 -10:00
Paul Gauthier
9e3adf0bf8 fix: Temporarily disable OpenRouter OAuth onboarding flow 2025-03-28 18:46:39 -10:00
Paul Gauthier
2bc0aa1777 docs: Fix docstring for check_openrouter_tier failure case 2025-03-28 18:45:31 -10:00
Paul Gauthier (aider)
3bc4064b61 fix: Default to free tier if OpenRouter tier check fails 2025-03-28 18:44:57 -10:00
Paul Gauthier (aider)
b4f9258f3c fix: Remove unused exception variable in webbrowser.open call 2025-03-28 18:29:26 -10:00
Paul Gauthier (aider)
ad844cce5c style: Fix linting issues in onboarding.py 2025-03-28 18:29:14 -10:00
Paul Gauthier (aider)
c73b064133 feat: Add OpenRouter tier-based model selection logic 2025-03-28 18:29:08 -10:00
Paul Gauthier
bd9b63a1aa refactor: Simplify OpenRouter OAuth flow messages and error handling 2025-03-28 18:29:05 -10:00
Paul Gauthier (aider)
2d87431aeb style: Apply linter formatting to onboarding.py 2025-03-28 18:03:10 -10:00
Paul Gauthier (aider)
3f3b1fb657 refactor: Update OpenRouter OAuth flow timeout to 5 minutes 2025-03-28 18:03:05 -10:00
Paul Gauthier
477f9eb4ec refactor: Update OpenRouter onboarding messages and flow 2025-03-28 18:03:04 -10:00
Paul Gauthier (aider)
91497dc2ee feat: Append OpenRouter API key to oauth-keys.env instead of overwriting 2025-03-28 17:57:26 -10:00
Paul Gauthier
928b78d9f6 feat: Simplify default model selection and improve OpenRouter OAuth key saving 2025-03-28 17:57:24 -10:00
Paul Gauthier (aider)
51825663b9 refactor: Extract model selection and OAuth logic into separate functions 2025-03-28 17:53:15 -10:00
Paul Gauthier
01fdbda728 refactor: Restructure model selection logic with new helper functions 2025-03-28 17:53:13 -10:00
Paul Gauthier
fa3c68fccd fix: Use print for auth URL and refine missing key message 2025-03-28 17:46:12 -10:00
Paul Gauthier (aider)
189977e4c7 fix: Update OpenRouter OAuth callback URL path to /callback/aider 2025-03-28 17:35:26 -10:00
Paul Gauthier (aider)
290fd99b6d feat: Load OAuth keys from ~/.aider/oauth-keys.env 2025-03-28 17:33:30 -10:00
Paul Gauthier (aider)
15cec5bd50 feat: Save OpenRouter API key to ~/.aider/oauth-keys.env 2025-03-28 17:32:39 -10:00
Paul Gauthier (aider)
f53db636e1 style: Format comments 2025-03-28 17:28:00 -10:00
Paul Gauthier (aider)
47d3802ffe feat: Handle user interruption during OpenRouter OAuth flow 2025-03-28 17:27:54 -10:00
Paul Gauthier (aider)
e98ffb5ae0 fix: Fix OAuth server premature shutdown on callback 2025-03-28 17:25:02 -10:00
Paul Gauthier (aider)
5d77eb1314 feat: Redirect callback URL to website if code param is missing 2025-03-28 17:23:21 -10:00
Paul Gauthier
f124cdbb6f refactor: Remove stream argument from Coder in onboarding 2025-03-28 17:21:04 -10:00
Paul Gauthier (aider)
1649d084d2 fix: Remove unused sys import 2025-03-28 17:16:28 -10:00
Paul Gauthier (aider)
36ca790c3d style: Sort imports alphabetically 2025-03-28 17:16:09 -10:00
Paul Gauthier (aider)
a91a8216b7 test: Add main function to test OpenRouter OAuth flow 2025-03-28 17:16:03 -10:00
Paul Gauthier (aider)
8cae7b20e7 fix: Remove unused variable found_key_env_var 2025-03-28 17:14:40 -10:00
Paul Gauthier (aider)
a537119f3d fix: Address flake8 linting errors in onboarding 2025-03-28 17:14:15 -10:00
Paul Gauthier (aider)
15fe0afe62 style: Run linter on onboarding module 2025-03-28 17:13:48 -10:00
Paul Gauthier (aider)
1b2a4db1ed feat: Add OpenRouter OAuth PKCE flow for authentication 2025-03-28 17:13:42 -10:00
Paul Gauthier (aider)
88a02723fa refactor: Extract default model selection logic to onboarding module 2025-03-28 16:54:10 -10:00
Paul Gauthier
7d013f35e2 rename 2025-03-28 16:51:58 -10:00
Paul Gauthier (aider)
e881d33bea docs: Update README cog import for homepage.py 2025-03-28 16:51:46 -10:00
Paul Gauthier (aider)
38da91becd refactor: Rename badges.py to homepage.py and update imports 2025-03-28 16:51:05 -10:00
Paul Gauthier
c99d96a700 copy 2025-03-28 16:46:08 -10:00
Paul Gauthier (aider)
fa89a6950b test: Update default Gemini model assertion 2025-03-28 16:43:39 -10:00
Paul Gauthier
cde08da282 refactor: Simplify model selection warning and analytics event logging 2025-03-28 16:42:12 -10:00
Paul Gauthier
8619bd4e84 refactor: Update auto_model_selection analytics event properties 2025-03-28 16:35:30 -10:00
Paul Gauthier (aider)
f49449b520 style: Remove trailing whitespace 2025-03-28 16:32:54 -10:00
Paul Gauthier (aider)
2fe79ac6a3 feat: Add analytics for auto-commits and auto-model selection 2025-03-28 16:32:47 -10:00
Paul Gauthier
d8830c43c5 fix: Clarify cost estimate warning for streaming/caching 2025-03-28 16:32:44 -10:00
Paul Gauthier (aider)
4ac945da70 feat: boost repomap ranking for files with mentioned path components 2025-03-28 16:23:35 -10:00
Paul Gauthier (aider)
ee0019e25f fix: Correct typo in streaming warning message 2025-03-28 16:21:05 -10:00
Paul Gauthier (aider)
f37b814570 feat: Improve streaming cost warning display 2025-03-28 16:20:23 -10:00
Paul Gauthier
e559bc8694 docs: Update model usage statistics in FAQ 2025-03-28 16:10:19 -10:00
Paul Gauthier (aider)
f7618440e7 chore: Remove unused VERTEX_AI_API_KEY entry 2025-03-28 16:10:08 -10:00
Paul Gauthier (aider)
7bc62cb674 feat: add VERTEXAI_PROJECT support for Gemini 2.5 Pro model 2025-03-28 16:09:59 -10:00
Paul Gauthier (aider)
db77e2e9b9 feat: prioritize gemini-2.5-pro when GEMINI or VERTEX keys present 2025-03-28 16:07:54 -10:00
Paul Gauthier (aider)
083b49f3c4 style: Improve testimonial card spacing and readability 2025-03-28 16:04:53 -10:00
Paul Gauthier (aider)
f3f0416d31 feat: maintain consistent height for testimonial cards 2025-03-28 16:02:54 -10:00
Paul Gauthier (aider)
775a9f86a1 style: Add CSS transitions for testimonial cards 2025-03-28 16:01:13 -10:00
Paul Gauthier (aider)
e2bfdc444a feat: replace flip animation with elegant fade transition for testimonials 2025-03-28 16:00:43 -10:00
Paul Gauthier
3c7783585e feat: Speed up testimonials rotation and add script tags 2025-03-28 15:57:53 -10:00
Paul Gauthier (aider)
761a297903 feat: include script tags in testimonials JS output 2025-03-28 15:53:53 -10:00
Paul Gauthier
6d30094a93 chore: Remove commented code from index.html 2025-03-28 15:53:47 -10:00
Paul Gauthier (aider)
d8e1816774 fix: Properly indent cog directive in index.html 2025-03-28 15:51:18 -10:00
Paul Gauthier
ae371cb362 feat: Add new testimonials to website 2025-03-28 15:51:11 -10:00
Paul Gauthier (aider)
73c46e8e24 fix: Remove unnecessary f-string in testimonials formatting 2025-03-28 15:49:14 -10:00
Paul Gauthier (aider)
ef4c40c692 style: Format code with linter 2025-03-28 15:48:48 -10:00
Paul Gauthier (aider)
04b3ada7f7 feat: dynamically load testimonials from README using cog 2025-03-28 15:48:39 -10:00
Paul Gauthier
424b43b3d3 copy 2025-03-28 15:45:04 -10:00
Paul Gauthier (aider)
9a9255d6f9 docs: add more user testimonials to README 2025-03-28 15:42:48 -10:00
Paul Gauthier
d9e52e41ff fix: Replace self.print_error with print for timeout message 2025-03-28 15:36:25 -10:00
Paul Gauthier (aider)
a038bc002a feat: Include URL in page timeout warning message 2025-03-28 15:35:01 -10:00
Paul Gauthier (aider)
fa256eb1a7 feat: Change timeout error to warning and continue scraping 2025-03-28 15:34:18 -10:00
Paul Gauthier (aider)
6689f001cf docs: Add positive user quotes to kind words section 2025-03-28 15:33:47 -10:00
Paul Gauthier
cc043bab9c Merge branch 'main' of github.com:Aider-AI/aider 2025-03-28 15:30:15 -10:00
Vasil Markoukin
5af73b1dcf feat: add repomap support for Scala
Resolves #3578
2025-03-28 10:07:57 +03:00
Paul Gauthier
85925a2dc6 copy 2025-03-27 18:40:09 -10:00
paul-gauthier
fb23b6c26f Merge pull request #3662 from schpet/markdown-headings-left 2025-03-27 11:58:31 -10:00
Peter Schilling (aider)
d5cec5f71e aider: chore: Remove unnecessary comment in mdstream.py 2025-03-27 14:08:04 -07:00
Peter Schilling (aider)
13b62e3d06 aider: fix: Use correct token type for markdown heading alignment 2025-03-27 13:59:58 -07:00
Peter Schilling (aider)
779f07f072 aider: fix: Align headings left while preserving h1/h2 styling 2025-03-27 13:58:05 -07:00
Peter Schilling (aider)
b923d63700 aider: style: Left-align markdown headings 2025-03-27 13:48:12 -07:00
Paul Gauthier
7e2dd9bc04 copy 2025-03-27 09:47:30 -10:00
Paul Gauthier
ef1f869b73 set version to 0.79.3.dev 2025-03-27 09:35:00 -10:00
Paul Gauthier
959d6334db version bump to 0.79.2 2025-03-27 09:34:57 -10:00
Paul Gauthier
d7b00b93c7 copy 2025-03-27 09:28:01 -10:00
Paul Gauthier
eec084c842 copy 2025-03-27 09:27:05 -10:00
Paul Gauthier
87b504a58f copy 2025-03-27 09:03:40 -10:00
Paul Gauthier (aider)
243d4d0727 feat: add openrouter/deepseek-chat-v3-0324 model config 2025-03-27 09:02:55 -10:00
Paul Gauthier (aider)
673acf4308 feat: enable retries for OpenRouter choices errors 2025-03-27 07:01:10 -10:00
Paul Gauthier (aider)
fd180ebff5 style: Format test_exceptions.py with linter 2025-03-27 06:58:46 -10:00
Paul Gauthier (aider)
61705ce7fc test: add coverage for OpenRouter API error detection 2025-03-27 06:58:38 -10:00
Paul Gauthier (aider)
6e1dd4474b feat: add OpenRouter API error detection 2025-03-27 06:56:28 -10:00
Matteo Landi (aider)
7924657584 feat: Improve C-x C-e editor keybinding to handle cursor and newline 2025-03-27 10:35:03 +01:00
Matteo Landi (aider)
4f5ed8ace0 feat: Add C-x C-e keybinding to edit input in external editor 2025-03-27 08:33:50 +01:00
Paul Gauthier (aider)
8737220fb6 feat: update Gemini 2.5 Pro max output tokens to 64k 2025-03-26 08:42:28 -10:00
Paul Gauthier
bcb01e8c1b Merge branch 'main' of github.com:Aider-AI/aider 2025-03-26 08:37:58 -10:00
Paul Gauthier
41f669bb89 feat: Add 'gemini' alias for gemini-2.5-pro model 2025-03-26 08:37:51 -10:00
Paul Gauthier
983bc199b3 cleanup 2025-03-26 08:37:40 -10:00
paul-gauthier
8f15269bd0 Merge pull request #3647 from iamFIREcracker/lisp-comments
Add support for Lisp-style comments in file watcher
2025-03-26 08:36:56 -10:00
Paul Gauthier (aider)
6ffe3e7067 style: Format code with linter 2025-03-26 07:11:22 -10:00
Paul Gauthier (aider)
51bf6035f7 feat: add footer safe zone for GitHub URL in confetti image 2025-03-26 07:11:15 -10:00
Paul Gauthier (aider)
249a6fc9b1 chore: remove yellow hexagon confettis below stars line 2025-03-26 07:10:36 -10:00
Paul Gauthier (aider)
c6d4337855 style: Fix whitespace in 30k-image.py 2025-03-26 07:09:28 -10:00
Paul Gauthier (aider)
9fa3636c57 feat: add safe zone for text in confetti generation 2025-03-26 07:09:20 -10:00
Paul Gauthier
a417e6e644 style: Fix trailing whitespace in SVG generation script 2025-03-26 07:07:16 -10:00
Paul Gauthier (aider)
999eb86d7a fix: Add missing math import in 30k-image.py 2025-03-26 07:01:51 -10:00
Paul Gauthier (aider)
7b97f93051 style: Reformat code with linter 2025-03-26 07:01:19 -10:00
Paul Gauthier (aider)
02bc926d75 feat: add script to generate 30k GitHub stars celebration SVG 2025-03-26 07:01:11 -10:00
Paul Gauthier
48ee3cdf98 feat: Add script to generate 30k images 2025-03-26 07:01:05 -10:00
Paul Gauthier (aider)
2556a912d3 style: Format SVG elements with consistent line breaks 2025-03-26 06:59:45 -10:00
Paul Gauthier (aider)
487674b1c5 feat: enhance celebration image with GitHub branding and animations 2025-03-26 06:59:37 -10:00
Paul Gauthier (aider)
347fbf6471 style: Format code with linter 2025-03-26 06:55:12 -10:00
Paul Gauthier (aider)
3eff70a3bc feat: enhance celebration image with decorations and glow effects 2025-03-26 06:55:04 -10:00
Paul Gauthier (aider)
ad7c708039 style: Format code with linter 2025-03-26 06:51:43 -10:00
Paul Gauthier (aider)
f993c1f22c feat: Improve celebration image with glow effect and tighter spacing 2025-03-26 06:51:36 -10:00
Paul Gauthier (aider)
75b714a1ad fix: Resolve flake8 errors in generate_celebration_image.py 2025-03-26 06:47:40 -10:00
Paul Gauthier (aider)
0636d40909 style: Fix linting issues in generate_celebration_image.py 2025-03-26 06:47:20 -10:00
Paul Gauthier (aider)
cbb3660a17 feat: Convert celebration image generator to SVG format 2025-03-26 06:47:13 -10:00
Paul Gauthier (aider)
42363beb72 style: Format code with linter 2025-03-26 06:45:10 -10:00
Paul Gauthier (aider)
efa36a7196 feat: add script to generate 30k stars celebration image 2025-03-26 06:45:03 -10:00
Matteo Landi (aider)
fab713a6a8 fix: Handle Lisp semicolon comments in watch.py 2025-03-26 13:01:53 +01:00
Matteo Landi (aider)
7d5f1143af test: Add Lisp-style comment tests to file watcher 2025-03-26 08:17:29 +01:00
Matteo Landi (aider)
f05f8df44c refactor: Add support for Lisp-style comments in file watcher 2025-03-26 08:13:09 +01:00
Son H. Nguyen
a1286d0d4d doc: add example command for github copilot models 2025-03-08 14:18:48 +07:00
Son H. Nguyen
eef3a3afeb feat: add copilot models to model list 2025-03-08 14:07:15 +07:00
Mir Adnan ALI
3e8f9aa31c fix: Use shlex.quote() to lint filepaths containing shell metacharacters 2025-02-08 17:31:01 -05:00
Andrey Ivanov
afebfe5f4f fix[io.py]: completion menu current item color styling 2025-01-23 23:09:17 +00:00
71 changed files with 4730 additions and 1417 deletions

View File

@@ -0,0 +1,86 @@
name: Check PyPI Version
# Check to be sure `pip install aider-chat` installs the most recently published version.
# If dependencies get yanked, it may render the latest version uninstallable.
# See https://github.com/Aider-AI/aider/issues/3699 for example.
on:
schedule:
# Run once a day at midnight UTC
- cron: '0 0 * * *'
workflow_dispatch: # Allows manual triggering
jobs:
check_version:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
steps:
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install aider-chat
run: pip install aider-chat
- name: Get installed aider version
id: installed_version
run: |
set -x # Enable debugging output
aider_version_output=$(aider --version)
if [ $? -ne 0 ]; then
echo "Error: 'aider --version' command failed."
exit 1
fi
echo "Raw aider --version output: $aider_version_output"
# Extract version number (format X.Y.Z)
version_num=$(echo "$aider_version_output" | grep -oP '\d+\.\d+\.\d+')
# Check if grep found anything
if [ -z "$version_num" ]; then
echo "Error: Could not extract version number using grep -oP '\d+\.\d+\.\d+' from output: $aider_version_output"
exit 1
fi
echo "Extracted version number: $version_num"
echo "version=$version_num" >> $GITHUB_OUTPUT
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch all history for all tags
- name: Get latest tag
id: latest_tag
run: |
set -x # Enable debugging output
# Fetch all tags from remote just in case
git fetch --tags origin main
# Get the latest tag that strictly matches vX.Y.Z (no suffixes like .dev)
# List all tags, sort by version descending, filter for exact pattern, take the first one
latest_tag=$(git tag --sort=-v:refname | grep -P '^v\d+\.\d+\.\d+$' | head -n 1)
if [ -z "$latest_tag" ]; then
echo "Error: Could not find any tags matching the pattern '^v\d+\.\d+\.\d+$'"
exit 1
fi
echo "Latest non-dev tag: $latest_tag"
# Remove 'v' prefix for comparison
tag_num=${latest_tag#v}
echo "Extracted tag number: $tag_num"
echo "tag=$tag_num" >> $GITHUB_OUTPUT
- name: Compare versions
run: |
echo "Installed version: ${{ steps.installed_version.outputs.version }}"
echo "Latest tag version: ${{ steps.latest_tag.outputs.tag }}"
if [ "${{ steps.installed_version.outputs.version }}" != "${{ steps.latest_tag.outputs.tag }}" ]; then
echo "Error: Installed aider version (${{ steps.installed_version.outputs.version }}) does not match the latest tag (${{ steps.latest_tag.outputs.tag }})."
exit 1
fi
echo "Versions match."

View File

@@ -0,0 +1,90 @@
name: Windows Check PyPI Version
# Check to be sure `pip install aider-chat` installs the most recently published version on Windows.
# If dependencies get yanked, it may render the latest version uninstallable.
# See https://github.com/Aider-AI/aider/issues/3699 for example.
on:
schedule:
# Run once a day at 1 AM UTC (offset from Ubuntu check)
- cron: '0 1 * * *'
workflow_dispatch: # Allows manual triggering
jobs:
check_version:
runs-on: windows-latest
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
defaults:
run:
shell: pwsh # Use PowerShell for all run steps
steps:
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install aider-chat
run: pip install aider-chat
- name: Get installed aider version
id: installed_version
run: |
Write-Host "Running 'aider --version'..."
$aider_version_output = aider --version
if ($LASTEXITCODE -ne 0) {
Write-Error "Error: 'aider --version' command failed."
exit 1
}
Write-Host "Raw aider --version output: $aider_version_output"
# Extract version number (format X.Y.Z) using PowerShell regex
$match = [regex]::Match($aider_version_output, '\d+\.\d+\.\d+')
if (-not $match.Success) {
Write-Error "Error: Could not extract version number using regex '\d+\.\d+\.\d+' from output: $aider_version_output"
exit 1
}
$version_num = $match.Value
Write-Host "Extracted version number: $version_num"
echo "version=$version_num" >> $env:GITHUB_OUTPUT
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch all history for all tags
- name: Get latest tag
id: latest_tag
run: |
Write-Host "Fetching tags..."
# Fetch all tags from remote just in case
git fetch --tags origin main
Write-Host "Getting latest non-dev tag..."
# Get the latest tag that strictly matches vX.Y.Z (no suffixes like .dev)
# List all tags, sort by version descending, filter for exact pattern, take the first one
$latest_tag = (git tag --sort=-v:refname | Select-String -Pattern '^v\d+\.\d+\.\d+$' | Select-Object -First 1).Line
if (-not $latest_tag) {
Write-Error "Error: Could not find any tags matching the pattern '^v\d+\.\d+\.\d+$'"
exit 1
}
Write-Host "Latest non-dev tag: $latest_tag"
# Remove 'v' prefix for comparison
$tag_num = $latest_tag.Substring(1)
Write-Host "Extracted tag number: $tag_num"
echo "tag=$tag_num" >> $env:GITHUB_OUTPUT
- name: Compare versions
run: |
Write-Host "Installed version: ${{ steps.installed_version.outputs.version }}"
Write-Host "Latest tag version: ${{ steps.latest_tag.outputs.tag }}"
if ("${{ steps.installed_version.outputs.version }}" -ne "${{ steps.latest_tag.outputs.tag }}") {
Write-Error "Error: Installed aider version (${{ steps.installed_version.outputs.version }}) does not match the latest tag (${{ steps.latest_tag.outputs.tag }})."
exit 1
}
Write-Host "Versions match."

View File

@@ -18,5 +18,6 @@ repos:
rev: v2.2.6
hooks:
- id: codespell
args: ["--skip", "aider/website/docs/languages.md"]
additional_dependencies:
- tomli

View File

@@ -1,5 +1,94 @@
# Release history
### main branch
- Commit messages generated by aider are no longer forced to be entirely lowercase, by Peter Hadlaw.
- Updated default settings for Grok models.
- Aider wrote 64% of the code in this release.
### Aider v0.81.2
- Add support for `xai/grok-3-beta`, `xai/grok-3-mini-beta`, `openrouter/x-ai/grok-3-beta`, `openrouter/x-ai/grok-3-mini-beta`, and `openrouter/openrouter/optimus-alpha` models.
- Add alias "grok3" for `xai/grok-3-beta`.
- Add alias "optimus" for `openrouter/openrouter/optimus-alpha`.
- Fix URL extraction from error messages.
- Allow adding files by full path even if a file with the same basename is already in the chat.
- Fix quoting of values containing '#' in the sample `aider.conf.yml`.
- Add support for Fireworks AI model 'deepseek-v3-0324', by Felix Lisczyk.
- Commit messages generated by aider are now lowercase, by Anton Ödman.
- Aider wrote 64% of the code in this release.
### Aider v0.81.1
- Added support for the `gemini/gemini-2.5-pro-preview-03-25` model.
- Updated the `gemini` alias to point to `gemini/gemini-2.5-pro-preview-03-25`.
- Added the `gemini-exp` alias for `gemini/gemini-2.5-pro-exp-03-25`.
- Aider wrote 87% of the code in this release.
### Aider v0.81.0
- Added support for the `openrouter/openrouter/quasar-alpha` model.
- Run with `aider --model quasar`
- Offer OpenRouter OAuth authentication if an OpenRouter model is specified but the API key is missing.
- Prevent retrying API calls when the provider reports insufficient credits.
- Improve URL detection to exclude trailing double quotes.
- Aider wrote 86% of the code in this release.
### Aider v0.80.4
- Bumped deps to pickup litellm change to properly display the root cause of OpenRouter "choices" errors.
### Aider v0.80.3
- Improve error message for OpenRouter API connection issues to mention potential rate limiting or upstream provider issues.
- Configure weak models (`gemini/gemini-2.0-flash` and `openrouter/google/gemini-2.0-flash-exp:free`) for Gemini 2.5 Pro models.
- Add model metadata for `openrouter/google/gemini-2.0-flash-exp:free`.
### Aider v0.80.2
- Bumped deps.
### Aider v0.80.1
- Updated deps for yanked fsspec and aiohttp packages #3699
- Removed redundant dependency check during OpenRouter OAuth flow, by Claudia Pellegrino.
### Aider v0.80.0
- OpenRouter OAuth integration:
- Offer to OAuth against OpenRouter if no model and keys are provided.
- Select OpenRouter default model based on free/paid tier status if `OPENROUTER_API_KEY` is set and no model is specified.
- Prioritize `gemini/gemini-2.5-pro-exp-03-25` if `GEMINI_API_KEY` is set, and `vertex_ai/gemini-2.5-pro-exp-03-25` if `VERTEXAI_PROJECT` is set, when no model is specified.
- Validate user-configured color settings on startup and warn/disable invalid ones.
- Warn at startup if `--stream` and `--cache-prompts` are used together, as cost estimates may be inaccurate.
- Boost repomap ranking for files whose path components match identifiers mentioned in the chat.
- Change web scraping timeout from an error to a warning, allowing scraping to continue with potentially incomplete content.
- Left-align markdown headings in the terminal output, by Peter Schilling.
- Update edit format to the new model's default when switching models with `/model`, if the user was using the old model's default format.
- Add `Ctrl-X Ctrl-E` keybinding to edit the current input buffer in an external editor, by Matteo Landi.
- Fix linting errors for filepaths containing shell metacharacters, by Mir Adnan ALI.
- Add the `openrouter/deepseek-chat-v3-0324:free` model.
- Add repomap support for the Scala language, by Vasil Markoukin.
- Fixed bug in `/run` that was preventing auto-testing.
- Fix bug preventing `UnboundLocalError` during git tree traversal.
- Handle `GitCommandNotFound` error if git is not installed or not in PATH.
- Handle `FileNotFoundError` if the current working directory is deleted while aider is running.
- Fix completion menu current item color styling, by Andrey Ivanov.
- Aider wrote 87% of the code in this release.
### Aider v0.79.2
- Added 'gemini' alias for gemini-2.5-pro model.
- Updated Gemini 2.5 Pro max output tokens to 64k.
- Added support for Lisp-style semicolon comments in file watcher, by Matteo Landi.
- Added OpenRouter API error detection and retries.
- Added openrouter/deepseek-chat-v3-0324 model.
- Aider wrote 93% of the code in this release.
### Aider v0.79.1
- Improved model listing to include all models in fuzzy matching, including those provided by aider (not litellm).
### Aider v0.79.0
- Added support for Gemini 2.5 Pro models.

View File

@@ -20,20 +20,20 @@ Aider lets you pair program with LLMs to start a new project or build on your ex
<p align="center">
<!--[[[cog
from scripts.badges import get_badges_md
from scripts.homepage import get_badges_md
text = get_badges_md()
cog.out(text)
]]]-->
<a href="https://github.com/Aider-AI/aider/stargazers"><img alt="GitHub Stars" title="Total number of GitHub stars the Aider project has received"
src="https://img.shields.io/github/stars/Aider-AI/aider?style=flat-square&logo=github&color=f1c40f&labelColor=555555"/></a>
<a href="https://pypi.org/project/aider-chat/"><img alt="PyPI Downloads" title="Total number of installations via pip from PyPI"
src="https://img.shields.io/badge/📦%20Installs-1.7M-2ecc71?style=flat-square&labelColor=555555"/></a>
src="https://img.shields.io/badge/📦%20Installs-1.9M-2ecc71?style=flat-square&labelColor=555555"/></a>
<img alt="Tokens per week" title="Number of tokens processed weekly by Aider users"
src="https://img.shields.io/badge/📈%20Tokens%2Fweek-15B-3498db?style=flat-square&labelColor=555555"/>
<a href="https://openrouter.ai/"><img alt="OpenRouter Ranking" title="Aider's ranking among applications on the OpenRouter platform"
<a href="https://openrouter.ai/#options-menu"><img alt="OpenRouter Ranking" title="Aider's ranking among applications on the OpenRouter platform"
src="https://img.shields.io/badge/🏆%20OpenRouter-Top%2020-9b59b6?style=flat-square&labelColor=555555"/></a>
<a href="https://aider.chat/HISTORY.html"><img alt="Singularity" title="Percentage of the new code in Aider's last release written by Aider itself"
src="https://img.shields.io/badge/🔄%20Singularity-65%25-e74c3c?style=flat-square&labelColor=555555"/></a>
src="https://img.shields.io/badge/🔄%20Singularity-86%25-e74c3c?style=flat-square&labelColor=555555"/></a>
<!--[[[end]]]-->
</p>
@@ -160,3 +160,13 @@ See the [installation instructions](https://aider.chat/docs/install.html) and [u
- *"Hands down, this is the best AI coding assistant tool so far."* — [IndyDevDan](https://www.youtube.com/watch?v=MPYFPvxfGZs)
- *"[Aider] changed my daily coding workflows. It's mind-blowing how a single Python application can change your life."* — [maledorak](https://discord.com/channels/1131200896827654144/1131200896827654149/1258453375620747264)
- *"Best agent for actual dev work in existing codebases."* — [Nick Dobos](https://twitter.com/NickADobos/status/1690408967963652097?s=20)
- *"One of my favorite pieces of software. Blazing trails on new paradigms!"* — [Chris Wall](https://x.com/chris65536/status/1905053299251798432)
- *"Aider has been revolutionary for me and my work."* — [Starry Hope](https://x.com/starryhopeblog/status/1904985812137132056)
- *"Try aider! One of the best ways to vibe code."* — [Chris Wall](https://x.com/Chris65536/status/1905053418961391929)
- *"Aider is hands down the best. And it's free and opensource."* — [AriyaSavakaLurker](https://www.reddit.com/r/ChatGPTCoding/comments/1ik16y6/whats_your_take_on_aider/mbip39n/)
- *"Aider is also my best friend."* — [jzn21](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27dcnb/)
- *"Try Aider, it's worth it."* — [jorgejhms](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27cp99/)
- *"I like aider :)"* — [Chenwei Cui](https://x.com/ccui42/status/1904965344999145698)
- *"Aider is the precision tool of LLM code gen. It is minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control."* — [Reilly Sweetland](https://x.com/rsweetland/status/1904963807237259586)
- *"Cannot believe aider vibe coded a 650 LOC feature across service and cli today in 1 shot."* - [autopoietist](https://discord.com/channels/1131200896827654144/1131200896827654149/1355675042259796101)

View File

@@ -1,6 +1,6 @@
from packaging import version
__version__ = "0.79.2.dev"
__version__ = "0.81.4.dev"
safe_version = __version__
try:

View File

@@ -143,7 +143,10 @@ class YamlHelpFormatter(argparse.HelpFormatter):
default = "true"
if default:
parts.append(f"#{switch}: {default}\n")
if "#" in default:
parts.append(f'#{switch}: "{default}"\n')
else:
parts.append(f"#{switch}: {default}\n")
elif action.nargs in ("*", "+") or isinstance(action, argparse._AppendAction):
parts.append(f"#{switch}: xxx")
parts.append("## Specify multiple values like this:")

View File

@@ -209,12 +209,12 @@ class Coder:
output = f"{prefix}: {main_model.name} with {self.edit_format} edit format"
# Check for thinking token budget
thinking_tokens = main_model.get_thinking_tokens(main_model)
thinking_tokens = main_model.get_thinking_tokens()
if thinking_tokens:
output += f", {thinking_tokens} think tokens"
# Check for reasoning effort
reasoning_effort = main_model.get_reasoning_effort(main_model)
reasoning_effort = main_model.get_reasoning_effort()
if reasoning_effort:
output += f", reasoning {reasoning_effort}"
@@ -922,10 +922,11 @@ class Coder:
else:
self.io.tool_error(text)
url_pattern = re.compile(r"(https?://[^\s/$.?#].[^\s]*)")
# Exclude double quotes from the matched URL characters
url_pattern = re.compile(r'(https?://[^\s/$.?#].[^\s"]*)')
urls = list(set(url_pattern.findall(text))) # Use set to remove duplicates
for url in urls:
url = url.rstrip(".',\"")
url = url.rstrip(".',\"}") # Added } to the characters to strip
self.io.offer_url(url)
return urls
@@ -934,7 +935,8 @@ class Coder:
if not self.detect_urls:
return inp
url_pattern = re.compile(r"(https?://[^\s/$.?#].[^\s]*[^\s,.])")
# Exclude double quotes from the matched URL characters
url_pattern = re.compile(r'(https?://[^\s/$.?#].[^\s"]*[^\s,.])')
urls = list(set(url_pattern.findall(inp))) # Use set to remove duplicates
group = ConfirmGroup(urls)
for url in urls:
@@ -1624,10 +1626,6 @@ class Coder:
mentioned_rel_fnames = set()
fname_to_rel_fnames = {}
for rel_fname in addable_rel_fnames:
# Skip files that share a basename with files already in chat
if os.path.basename(rel_fname) in existing_basenames:
continue
normalized_rel_fname = rel_fname.replace("\\", "/")
normalized_words = set(word.replace("\\", "/") for word in words)
if normalized_rel_fname in normalized_words:
@@ -1642,6 +1640,10 @@ class Coder:
fname_to_rel_fnames[fname].append(rel_fname)
for fname, rel_fnames in fname_to_rel_fnames.items():
# If the basename is already in chat, don't add based on a basename mention
if fname in existing_basenames:
continue
# If the basename mention is unique among addable files and present in the text
if len(rel_fnames) == 1 and fname in words:
mentioned_rel_fnames.add(rel_fnames[0])
@@ -1961,11 +1963,6 @@ class Coder:
f" ${format_cost(self.total_cost)} session."
)
if self.add_cache_headers and self.stream:
warning = " Use --no-stream for accurate caching costs."
self.usage_report = tokens_report + "\n" + cost_report + warning
return
if cache_hit_tokens and cache_write_tokens:
sep = "\n"
else:

View File

@@ -19,7 +19,7 @@ class EditBlockFencedPrompts(EditBlockPrompts):
Here are the *SEARCH/REPLACE* blocks:
{fence[0]}
{fence[0]}python
mathweb/flask/app.py
<<<<<<< SEARCH
from flask import Flask
@@ -29,7 +29,7 @@ from flask import Flask
>>>>>>> REPLACE
{fence[1]}
{fence[0]}
{fence[0]}python
mathweb/flask/app.py
<<<<<<< SEARCH
def factorial(n):
@@ -44,7 +44,7 @@ def factorial(n):
>>>>>>> REPLACE
{fence[1]}
{fence[0]}
{fence[0]}python
mathweb/flask/app.py
<<<<<<< SEARCH
return str(factorial(n))
@@ -68,7 +68,7 @@ mathweb/flask/app.py
Here are the *SEARCH/REPLACE* blocks:
{fence[0]}
{fence[0]}python
hello.py
<<<<<<< SEARCH
=======
@@ -79,7 +79,7 @@ def hello():
>>>>>>> REPLACE
{fence[1]}
{fence[0]}
{fence[0]}python
main.py
<<<<<<< SEARCH
def hello():
@@ -93,3 +93,50 @@ from hello import hello
""",
),
]
system_reminder = """# *SEARCH/REPLACE block* Rules:
Every *SEARCH/REPLACE block* must use this format:
1. The opening fence and code language, eg: {fence[0]}python
2. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.
3. The start of search block: <<<<<<< SEARCH
4. A contiguous chunk of lines to search for in the existing source code
5. The dividing line: =======
6. The lines to replace into the source code
7. The end of the replace block: >>>>>>> REPLACE
8. The closing fence: {fence[1]}
Use the *FULL* file path, as shown to you by the user.
{quad_backtick_reminder}
Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.
If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.
*SEARCH/REPLACE* blocks will *only* replace the first match occurrence.
Including multiple unique *SEARCH/REPLACE* blocks if needed.
Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.
Keep *SEARCH/REPLACE* blocks concise.
Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.
Include just the changing lines, and a few surrounding lines if needed for uniqueness.
Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.
Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!
To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.
Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.
If you want to put code in a new file, use a *SEARCH/REPLACE block* with:
- A new file path, including dir name if needed
- An empty `SEARCH` section
- The new file's contents in the `REPLACE` section
To rename files which have been added to the chat, use shell commands at the end of your response.
If the user just says something like "ok" or "go ahead" or "do that" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.
The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.
{lazy_prompt}
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
{shell_cmd_reminder}
"""

View File

@@ -93,7 +93,17 @@ class Commands:
weak_model=self.coder.main_model.weak_model.name,
)
models.sanity_check_models(self.io, model)
raise SwitchCoder(main_model=model)
# Check if the current edit format is the default for the old model
old_model_edit_format = self.coder.main_model.edit_format
current_edit_format = self.coder.edit_format
new_edit_format = current_edit_format
if current_edit_format == old_model_edit_format:
# If the user was using the old model's default, switch to the new model's default
new_edit_format = model.edit_format
raise SwitchCoder(main_model=model, edit_format=new_edit_format)
def cmd_editor_model(self, args):
"Switch the Editor Model to a new LLM"
@@ -1004,9 +1014,15 @@ class Commands:
dict(role="assistant", content="Ok."),
]
if add and exit_status != 0:
if add_on_nonzero_exit and exit_status != 0:
# Return the formatted output message for test failures
return msg
elif add and exit_status != 0:
self.io.placeholder = "What's wrong? Fix"
# Return None if output wasn't added or command succeeded
return None
def cmd_exit(self, args):
"Exit the application"
self.coder.event("exit", reason="/exit")
@@ -1496,11 +1512,11 @@ class Commands:
if not args.strip():
# Display current value if no args are provided
formatted_budget = model.get_thinking_tokens(model)
formatted_budget = model.get_thinking_tokens()
if formatted_budget is None:
self.io.tool_output("Thinking tokens are not currently set.")
else:
budget = model.extra_params["thinking"].get("budget_tokens")
budget = model.get_raw_thinking_tokens()
self.io.tool_output(
f"Current thinking token budget: {budget:,} tokens ({formatted_budget})."
)
@@ -1509,8 +1525,8 @@ class Commands:
value = args.strip()
model.set_thinking_tokens(value)
formatted_budget = model.get_thinking_tokens(model)
budget = model.extra_params["thinking"].get("budget_tokens")
formatted_budget = model.get_thinking_tokens()
budget = model.get_raw_thinking_tokens()
self.io.tool_output(f"Set thinking token budget to {budget:,} tokens ({formatted_budget}).")
self.io.tool_output()
@@ -1525,7 +1541,7 @@ class Commands:
if not args.strip():
# Display current value if no args are provided
reasoning_value = model.get_reasoning_effort(model)
reasoning_value = model.get_reasoning_effort()
if reasoning_value is None:
self.io.tool_output("Reasoning effort is not currently set.")
else:
@@ -1534,7 +1550,7 @@ class Commands:
value = args.strip()
model.set_reasoning_effort(value)
reasoning_value = model.get_reasoning_effort(model)
reasoning_value = model.get_reasoning_effort()
self.io.tool_output(f"Set reasoning effort to {reasoning_value}")
self.io.tool_output()

View File

@@ -83,4 +83,25 @@ class LiteLLMExceptions:
)
if "boto3" in str(ex):
return ExInfo("APIConnectionError", False, "You need to: pip install boto3")
if "OpenrouterException" in str(ex) and "'choices'" in str(ex):
return ExInfo(
"APIConnectionError",
True,
(
"OpenRouter or the upstream API provider is down, overloaded or rate"
" limiting your requests."
),
)
# Check for specific non-retryable APIError cases like insufficient credits
if ex.__class__ is litellm.APIError:
err_str = str(ex).lower()
if "insufficient credits" in err_str and '"code":402' in err_str:
return ExInfo(
"APIError",
False,
"Insufficient credits with the API provider. Please add credits.",
)
# Fall through to default APIError handling if not the specific credits error
return self.exceptions.get(ex.__class__, ExInfo(None, None, None))

View File

@@ -26,6 +26,7 @@ from prompt_toolkit.shortcuts import CompleteStyle, PromptSession
from prompt_toolkit.styles import Style
from pygments.lexers import MarkdownLexer, guess_lexer_for_filename
from pygments.token import Token
from rich.color import ColorParseError
from rich.columns import Columns
from rich.console import Console
from rich.markdown import Markdown
@@ -35,6 +36,7 @@ from rich.text import Text
from aider.mdstream import MarkdownStream
from .dump import dump # noqa: F401
from .editor import pipe_editor
from .utils import is_image_file
# Constants
@@ -360,6 +362,35 @@ class InputOutput:
self.file_watcher = file_watcher
self.root = root
# Validate color settings after console is initialized
self._validate_color_settings()
def _validate_color_settings(self):
"""Validate configured color strings and reset invalid ones."""
color_attributes = [
"user_input_color",
"tool_output_color",
"tool_error_color",
"tool_warning_color",
"assistant_output_color",
"completion_menu_color",
"completion_menu_bg_color",
"completion_menu_current_color",
"completion_menu_current_bg_color",
]
for attr_name in color_attributes:
color_value = getattr(self, attr_name, None)
if color_value:
try:
# Try creating a style to validate the color
RichStyle(color=color_value)
except ColorParseError as e:
self.console.print(
"[bold red]Warning:[/bold red] Invalid configuration for"
f" {attr_name}: '{color_value}'. {e}. Disabling this color."
)
setattr(self, attr_name, None) # Reset invalid color to None
def _get_style(self):
style_dict = {}
if not self.pretty:
@@ -385,9 +416,9 @@ class InputOutput:
# Conditionally add 'completion-menu.completion.current' style
completion_menu_current_style = []
if self.completion_menu_current_bg_color:
completion_menu_current_style.append(f"bg:{self.completion_menu_current_bg_color}")
completion_menu_current_style.append(self.completion_menu_current_bg_color)
if self.completion_menu_current_color:
completion_menu_current_style.append(self.completion_menu_current_color)
completion_menu_current_style.append(f"bg:{self.completion_menu_current_color}")
if completion_menu_current_style:
style_dict["completion-menu.completion.current"] = " ".join(
completion_menu_current_style
@@ -557,6 +588,21 @@ class InputOutput:
"Navigate forward through history"
event.current_buffer.history_forward()
@kb.add("c-x", "c-e")
def _(event):
"Edit current input in external editor (like Bash)"
buffer = event.current_buffer
current_text = buffer.text
# Open the editor with the current text
edited_text = pipe_editor(input_data=current_text)
# Replace the buffer with the edited text, strip any trailing newlines
buffer.text = edited_text.rstrip("\n")
# Move cursor to the end of the text
buffer.cursor_position = len(buffer.text)
@kb.add("enter", eager=True, filter=~is_searching)
def _(event):
"Handle Enter key press"
@@ -917,6 +963,7 @@ class InputOutput:
if not isinstance(message, Text):
message = Text(message)
color = ensure_hash_prefix(color) if color else None
style = dict(style=color) if self.pretty and color else dict()
try:
self.console.print(message, **style)
@@ -947,7 +994,7 @@ class InputOutput:
style = dict()
if self.pretty:
if self.tool_output_color:
style["color"] = self.tool_output_color
style["color"] = ensure_hash_prefix(self.tool_output_color)
style["reverse"] = bold
style = RichStyle(**style)

View File

@@ -4,6 +4,7 @@ import subprocess
import sys
import traceback
import warnings
import shlex
from dataclasses import dataclass
from pathlib import Path
@@ -44,7 +45,7 @@ class Linter:
return fname
def run_cmd(self, cmd, rel_fname, code):
cmd += " " + rel_fname
cmd += " " + shlex.quote(rel_fname)
returncode = 0
stdout = ""

View File

@@ -30,6 +30,7 @@ from aider.history import ChatSummary
from aider.io import InputOutput
from aider.llm import litellm # noqa: F401; properly init litellm on launch
from aider.models import ModelSettings
from aider.onboarding import offer_openrouter_oauth, select_default_model
from aider.repo import ANY_GIT_ERROR, GitRepo
from aider.report import report_uncaught_exceptions
from aider.versioncheck import check_version, install_from_main_branch, install_upgrade
@@ -357,11 +358,21 @@ def register_models(git_root, model_settings_fname, io, verbose=False):
def load_dotenv_files(git_root, dotenv_fname, encoding="utf-8"):
# Standard .env file search path
dotenv_files = generate_search_path_list(
".env",
git_root,
dotenv_fname,
)
# Explicitly add the OAuth keys file to the beginning of the list
oauth_keys_file = Path.home() / ".aider" / "oauth-keys.env"
if oauth_keys_file.exists():
# Insert at the beginning so it's loaded first (and potentially overridden)
dotenv_files.insert(0, str(oauth_keys_file.resolve()))
# Remove duplicates if it somehow got included by generate_search_path_list
dotenv_files = list(dict.fromkeys(dotenv_files))
loaded = []
for fname in dotenv_files:
try:
@@ -751,26 +762,49 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
alias, model = parts
models.MODEL_ALIASES[alias.strip()] = model.strip()
if not args.model:
# Select model based on available API keys
model_key_pairs = [
("ANTHROPIC_API_KEY", "sonnet"),
("DEEPSEEK_API_KEY", "deepseek"),
("OPENROUTER_API_KEY", "openrouter/anthropic/claude-3.7-sonnet"),
("OPENAI_API_KEY", "gpt-4o"),
("GEMINI_API_KEY", "flash"),
]
selected_model_name = select_default_model(args, io, analytics)
if not selected_model_name:
# Error message and analytics event are handled within select_default_model
# It might have already offered OAuth if no model/keys were found.
# If it failed here, we exit.
return 1
args.model = selected_model_name # Update args with the selected model
for env_key, model_name in model_key_pairs:
if os.environ.get(env_key):
args.model = model_name
io.tool_warning(
f"Found {env_key} so using {model_name} since no --model was specified."
# Check if an OpenRouter model was selected/specified but the key is missing
if args.model.startswith("openrouter/") and not os.environ.get("OPENROUTER_API_KEY"):
io.tool_warning(
f"The specified model '{args.model}' requires an OpenRouter API key, which was not"
" found."
)
# Attempt OAuth flow because the specific model needs it
if offer_openrouter_oauth(io, analytics):
# OAuth succeeded, the key should now be in os.environ.
# Check if the key is now present after the flow.
if os.environ.get("OPENROUTER_API_KEY"):
io.tool_output(
"OpenRouter successfully connected."
) # Inform user connection worked
else:
# This case should ideally not happen if offer_openrouter_oauth succeeded
# but check defensively.
io.tool_error(
"OpenRouter authentication seemed successful, but the key is still missing."
)
break
if not args.model:
io.tool_error("You need to specify a --model and an --api-key to use.")
io.offer_url(urls.models_and_keys, "Open documentation url for more info?")
analytics.event(
"exit",
reason="OpenRouter key missing after successful OAuth for specified model",
)
return 1
else:
# OAuth failed or was declined by the user
io.tool_error(
f"Unable to proceed without an OpenRouter API key for model '{args.model}'."
)
io.offer_url(urls.models_and_keys, "Open documentation URL for more info?")
analytics.event(
"exit",
reason="OpenRouter key missing for specified model and OAuth failed/declined",
)
return 1
main_model = models.Model(
@@ -918,6 +952,9 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
else:
map_tokens = args.map_tokens
# Track auto-commits configuration
analytics.event("auto_commits", enabled=bool(args.auto_commits))
try:
coder = Coder.create(
main_model=main_model,
@@ -1064,6 +1101,9 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
io.tool_output(f"Cur working dir: {Path.cwd()}")
io.tool_output(f"Git working dir: {git_root}")
if args.stream and args.cache_prompts:
io.tool_warning("Cost estimates may be inaccurate when using streaming and caching.")
if args.load:
commands.cmd_load(args.load)

View File

@@ -3,9 +3,11 @@
import io
import time
from rich import box
from rich.console import Console
from rich.live import Live
from rich.markdown import CodeBlock, Markdown
from rich.markdown import CodeBlock, Heading, Markdown
from rich.panel import Panel
from rich.syntax import Syntax
from rich.text import Text
@@ -56,13 +58,34 @@ class NoInsetCodeBlock(CodeBlock):
yield syntax
class LeftHeading(Heading):
"""A heading class that renders left-justified."""
def __rich_console__(self, console, options):
text = self.text
text.justify = "left" # Override justification
if self.tag == "h1":
# Draw a border around h1s, but keep text left-aligned
yield Panel(
text,
box=box.HEAVY,
style="markdown.h1.border",
)
else:
# Styled text for h2 and beyond
if self.tag == "h2":
yield Text("") # Keep the blank line before h2
yield text
class NoInsetMarkdown(Markdown):
"""Markdown with code blocks that have no padding."""
"""Markdown with code blocks that have no padding and left-justified headings."""
elements = {
**Markdown.elements,
"fence": NoInsetCodeBlock,
"code_block": NoInsetCodeBlock,
"heading_open": LeftHeading,
}

View File

@@ -88,9 +88,14 @@ MODEL_ALIASES = {
"3": "gpt-3.5-turbo",
# Other models
"deepseek": "deepseek/deepseek-chat",
"r1": "deepseek/deepseek-reasoner",
"flash": "gemini/gemini-2.0-flash-exp",
"quasar": "openrouter/openrouter/quasar-alpha",
"r1": "deepseek/deepseek-reasoner",
"gemini-2.5-pro": "gemini/gemini-2.5-pro-exp-03-25",
"gemini": "gemini/gemini-2.5-pro-preview-03-25",
"gemini-exp": "gemini/gemini-2.5-pro-exp-03-25",
"grok3": "xai/grok-3-beta",
"optimus": "openrouter/openrouter/optimus-alpha",
}
# Model metadata loaded from resources and user's files.
@@ -687,23 +692,24 @@ class Model(ModelSettings):
else:
self.extra_params["thinking"] = {"type": "enabled", "budget_tokens": num_tokens}
def get_thinking_tokens(self, model):
def get_raw_thinking_tokens(self):
"""Get formatted thinking token budget if available"""
budget = None
if model.extra_params:
if self.extra_params:
# Check for OpenRouter reasoning format
if (
"reasoning" in model.extra_params
and "max_tokens" in model.extra_params["reasoning"]
):
budget = model.extra_params["reasoning"]["max_tokens"]
if "reasoning" in self.extra_params and "max_tokens" in self.extra_params["reasoning"]:
budget = self.extra_params["reasoning"]["max_tokens"]
# Check for standard thinking format
elif (
"thinking" in model.extra_params
and "budget_tokens" in model.extra_params["thinking"]
"thinking" in self.extra_params and "budget_tokens" in self.extra_params["thinking"]
):
budget = model.extra_params["thinking"]["budget_tokens"]
budget = self.extra_params["thinking"]["budget_tokens"]
return budget
def get_thinking_tokens(self):
budget = self.get_raw_thinking_tokens()
if budget is not None:
# Format as xx.yK for thousands, xx.yM for millions
@@ -721,14 +727,14 @@ class Model(ModelSettings):
return f"{value:.1f}k"
return None
def get_reasoning_effort(self, model):
def get_reasoning_effort(self):
"""Get reasoning effort value if available"""
if (
model.extra_params
and "extra_body" in model.extra_params
and "reasoning_effort" in model.extra_params["extra_body"]
self.extra_params
and "extra_body" in self.extra_params
and "reasoning_effort" in self.extra_params["extra_body"]
):
return model.extra_params["extra_body"]["reasoning_effort"]
return self.extra_params["extra_body"]["reasoning_effort"]
return None
def is_deepseek_r1(self):

428
aider/onboarding.py Normal file
View File

@@ -0,0 +1,428 @@
import base64
import hashlib
import http.server
import os
import secrets
import socketserver
import threading
import time
import webbrowser
from urllib.parse import parse_qs, urlparse
import requests
from aider import urls
from aider.io import InputOutput
def check_openrouter_tier(api_key):
"""
Checks if the user is on a free tier for OpenRouter.
Args:
api_key: The OpenRouter API key to check.
Returns:
A boolean indicating if the user is on a free tier (True) or paid tier (False).
Returns True if the check fails.
"""
try:
response = requests.get(
"https://openrouter.ai/api/v1/auth/key",
headers={"Authorization": f"Bearer {api_key}"},
timeout=5, # Add a reasonable timeout
)
response.raise_for_status()
data = response.json()
# According to the documentation, 'is_free_tier' will be true if the user has never paid
return data.get("data", {}).get("is_free_tier", True) # Default to True if not found
except Exception:
# If there's any error, we'll default to assuming free tier
return True
def try_to_select_default_model():
"""
Attempts to select a default model based on available API keys.
Checks OpenRouter tier status to select appropriate model.
Returns:
The name of the selected model, or None if no suitable default is found.
"""
# Special handling for OpenRouter
openrouter_key = os.environ.get("OPENROUTER_API_KEY")
if openrouter_key:
# Check if the user is on a free tier
is_free_tier = check_openrouter_tier(openrouter_key)
if is_free_tier:
return "openrouter/google/gemini-2.5-pro-exp-03-25:free"
else:
return "openrouter/anthropic/claude-3.7-sonnet"
# Select model based on other available API keys
model_key_pairs = [
("ANTHROPIC_API_KEY", "sonnet"),
("DEEPSEEK_API_KEY", "deepseek"),
("OPENAI_API_KEY", "gpt-4o"),
("GEMINI_API_KEY", "gemini/gemini-2.5-pro-exp-03-25"),
("VERTEXAI_PROJECT", "vertex_ai/gemini-2.5-pro-exp-03-25"),
]
for env_key, model_name in model_key_pairs:
api_key_value = os.environ.get(env_key)
if api_key_value:
return model_name
return None
def offer_openrouter_oauth(io, analytics):
"""
Offers OpenRouter OAuth flow to the user if no API keys are found.
Args:
io: The InputOutput object for user interaction.
analytics: The Analytics object for tracking events.
Returns:
True if authentication was successful, False otherwise.
"""
# No API keys found - Offer OpenRouter OAuth
io.tool_output("OpenRouter provides free and paid access to many LLMs.")
# Use confirm_ask which handles non-interactive cases
if io.confirm_ask(
"Login to OpenRouter or create a free account?",
default="y",
):
analytics.event("oauth_flow_initiated", provider="openrouter")
openrouter_key = start_openrouter_oauth_flow(io, analytics)
if openrouter_key:
# Successfully got key via OAuth, use the default OpenRouter model
# Ensure OPENROUTER_API_KEY is now set in the environment for later use
os.environ["OPENROUTER_API_KEY"] = openrouter_key
# Track OAuth success leading to model selection
analytics.event("oauth_flow_success")
return True
# OAuth failed or was cancelled by user implicitly (e.g., closing browser)
# Error messages are handled within start_openrouter_oauth_flow
analytics.event("oauth_flow_failure")
io.tool_error("OpenRouter authentication did not complete successfully.")
# Fall through to the final error message
return False
def select_default_model(args, io, analytics):
"""
Selects a default model based on available API keys if no model is specified.
Offers OAuth flow for OpenRouter if no keys are found.
Args:
args: The command line arguments object.
io: The InputOutput object for user interaction.
analytics: The Analytics object for tracking events.
Returns:
The name of the selected model, or None if no suitable default is found.
"""
if args.model:
return args.model # Model already specified
model = try_to_select_default_model()
if model:
io.tool_warning(f"Using {model} model with API key from environment.")
analytics.event("auto_model_selection", model=model)
return model
no_model_msg = "No LLM model was specified and no API keys were provided."
io.tool_warning(no_model_msg)
# Try OAuth if no model was detected
offer_openrouter_oauth(io, analytics)
# Check again after potential OAuth success
model = try_to_select_default_model()
if model:
return model
io.offer_url(urls.models_and_keys, "Open documentation URL for more info?")
# Helper function to find an available port
def find_available_port(start_port=8484, end_port=8584):
for port in range(start_port, end_port + 1):
try:
# Check if the port is available by trying to bind to it
with socketserver.TCPServer(("localhost", port), None):
return port
except OSError:
# Port is likely already in use
continue
return None
# PKCE code generation
def generate_pkce_codes():
code_verifier = secrets.token_urlsafe(64)
hasher = hashlib.sha256()
hasher.update(code_verifier.encode("utf-8"))
code_challenge = base64.urlsafe_b64encode(hasher.digest()).rstrip(b"=").decode("utf-8")
return code_verifier, code_challenge
# Function to exchange the authorization code for an API key
def exchange_code_for_key(code, code_verifier, io):
try:
response = requests.post(
"https://openrouter.ai/api/v1/auth/keys",
headers={"Content-Type": "application/json"},
json={
"code": code,
"code_verifier": code_verifier,
"code_challenge_method": "S256",
},
timeout=30, # Add a timeout
)
response.raise_for_status() # Raise exception for bad status codes (4xx or 5xx)
data = response.json()
api_key = data.get("key")
if not api_key:
io.tool_error("Error: 'key' not found in OpenRouter response.")
io.tool_error(f"Response: {response.text}")
return None
return api_key
except requests.exceptions.Timeout:
io.tool_error("Error: Request to OpenRouter timed out during code exchange.")
return None
except requests.exceptions.HTTPError as e:
io.tool_error(
"Error exchanging code for OpenRouter key:"
f" {e.response.status_code} {e.response.reason}"
)
io.tool_error(f"Response: {e.response.text}")
return None
except requests.exceptions.RequestException as e:
io.tool_error(f"Error exchanging code for OpenRouter key: {e}")
return None
except Exception as e:
io.tool_error(f"Unexpected error during code exchange: {e}")
return None
# Function to start the OAuth flow
def start_openrouter_oauth_flow(io, analytics):
"""Initiates the OpenRouter OAuth PKCE flow using a local server."""
port = find_available_port()
if not port:
io.tool_error("Could not find an available port between 8484 and 8584.")
io.tool_error("Please ensure a port in this range is free, or configure manually.")
return None
callback_url = f"http://localhost:{port}/callback/aider"
auth_code = None
server_error = None
server_started = threading.Event()
shutdown_server = threading.Event()
class OAuthCallbackHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
nonlocal auth_code, server_error
parsed_path = urlparse(self.path)
if parsed_path.path == "/callback/aider":
query_params = parse_qs(parsed_path.query)
if "code" in query_params:
auth_code = query_params["code"][0]
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
b"<html><body><h1>Success!</h1>"
b"<p>Aider has received the authentication code. "
b"You can close this browser tab.</p></body></html>"
)
# Signal the main thread to shut down the server
# Signal the main thread to shut down the server
shutdown_server.set()
else:
# Redirect to aider website if 'code' is missing (e.g., user visited manually)
self.send_response(302) # Found (temporary redirect)
self.send_header("Location", urls.website)
self.end_headers()
# No need to set server_error, just redirect.
# Do NOT shut down the server here; wait for timeout or success.
else:
# Redirect anything else (e.g., favicon.ico) to the main website as well
self.send_response(302)
self.send_header("Location", urls.website)
self.end_headers()
self.wfile.write(b"Not Found")
def log_message(self, format, *args):
# Suppress server logging to keep terminal clean
pass
def run_server():
nonlocal server_error
try:
with socketserver.TCPServer(("localhost", port), OAuthCallbackHandler) as httpd:
io.tool_output(f"Temporary server listening on {callback_url}", log_only=True)
server_started.set() # Signal that the server is ready
# Wait until shutdown is requested or timeout occurs (handled by main thread)
while not shutdown_server.is_set():
httpd.handle_request() # Handle one request at a time
# Add a small sleep to prevent busy-waiting if needed,
# though handle_request should block appropriately.
time.sleep(0.1)
io.tool_output("Shutting down temporary server.", log_only=True)
except Exception as e:
server_error = f"Failed to start or run temporary server: {e}"
server_started.set() # Signal even if failed, error will be checked
shutdown_server.set() # Ensure shutdown logic proceeds
server_thread = threading.Thread(target=run_server, daemon=True)
server_thread.start()
# Wait briefly for the server to start, or for an error
if not server_started.wait(timeout=5):
io.tool_error("Temporary authentication server failed to start in time.")
shutdown_server.set() # Ensure thread exits if it eventually starts
server_thread.join(timeout=1)
return None
# Check if server failed during startup
if server_error:
io.tool_error(server_error)
shutdown_server.set() # Ensure thread exits
server_thread.join(timeout=1)
return None
# Generate codes and URL
code_verifier, code_challenge = generate_pkce_codes()
auth_url_base = "https://openrouter.ai/auth"
auth_params = {
"callback_url": callback_url,
"code_challenge": code_challenge,
"code_challenge_method": "S256",
}
auth_url = f"{auth_url_base}?{'&'.join(f'{k}={v}' for k, v in auth_params.items())}"
io.tool_output("\nPlease open this URL in your browser to connect Aider with OpenRouter:")
io.tool_output()
print(auth_url)
MINUTES = 5
io.tool_output(f"\nWaiting up to {MINUTES} minutes for you to finish in the browser...")
io.tool_output("Use Control-C to interrupt.")
try:
webbrowser.open(auth_url)
except Exception:
pass
# Wait for the callback to set the auth_code or for timeout/error
interrupted = False
try:
shutdown_server.wait(timeout=MINUTES * 60) # Convert minutes to seconds
except KeyboardInterrupt:
io.tool_warning("\nOAuth flow interrupted.")
analytics.event("oauth_flow_failed", provider="openrouter", reason="user_interrupt")
interrupted = True
# Ensure the server thread is signaled to shut down
shutdown_server.set()
# Join the server thread to ensure it's cleaned up
server_thread.join(timeout=1)
if interrupted:
return None # Return None if interrupted by user
if server_error:
io.tool_error(f"Authentication failed: {server_error}")
analytics.event("oauth_flow_failed", provider="openrouter", reason=server_error)
return None
if not auth_code:
io.tool_error("Authentication with OpenRouter failed.")
analytics.event("oauth_flow_failed", provider="openrouter")
return None
io.tool_output("Completing authentication...")
analytics.event("oauth_flow_code_received", provider="openrouter")
# Exchange code for key
api_key = exchange_code_for_key(auth_code, code_verifier, io)
if api_key:
# Set env var for the current session immediately
os.environ["OPENROUTER_API_KEY"] = api_key
# Save the key to the oauth-keys.env file
try:
config_dir = os.path.expanduser("~/.aider")
os.makedirs(config_dir, exist_ok=True)
key_file = os.path.join(config_dir, "oauth-keys.env")
with open(key_file, "a", encoding="utf-8") as f:
f.write(f'OPENROUTER_API_KEY="{api_key}"\n')
io.tool_warning("Aider will load the OpenRouter key automatically in future sessions.")
io.tool_output()
analytics.event("oauth_flow_success", provider="openrouter")
return api_key
except Exception as e:
io.tool_error(f"Successfully obtained key, but failed to save it to file: {e}")
io.tool_warning("Set OPENROUTER_API_KEY environment variable for this session only.")
# Still return the key for the current session even if saving failed
analytics.event("oauth_flow_save_failed", provider="openrouter", reason=str(e))
return api_key
else:
io.tool_error("Authentication with OpenRouter failed.")
analytics.event("oauth_flow_failed", provider="openrouter", reason="code_exchange_failed")
return None
# Dummy Analytics class for testing
class DummyAnalytics:
def event(self, *args, **kwargs):
# print(f"Analytics Event: {args} {kwargs}") # Optional: print events
pass
def main():
"""Main function to test the OpenRouter OAuth flow."""
print("Starting OpenRouter OAuth flow test...")
# Use a real IO object for interaction
io = InputOutput(
pretty=True,
yes=False,
input_history_file=None,
chat_history_file=None,
tool_output_color="BLUE",
tool_error_color="RED",
)
# Use a dummy analytics object
analytics = DummyAnalytics()
# Ensure OPENROUTER_API_KEY is not set, to trigger the flow naturally
# (though start_openrouter_oauth_flow doesn't check this itself)
if "OPENROUTER_API_KEY" in os.environ:
print("Warning: OPENROUTER_API_KEY is already set in environment.")
# del os.environ["OPENROUTER_API_KEY"] # Optionally unset it for testing
api_key = start_openrouter_oauth_flow(io, analytics)
if api_key:
print("\nOAuth flow completed successfully!")
print(f"Obtained API Key (first 5 chars): {api_key[:5]}...")
# Be careful printing the key, even partially
else:
print("\nOAuth flow failed or was cancelled.")
print("\nOpenRouter OAuth flow test finished.")
if __name__ == "__main__":
main()

View File

@@ -15,7 +15,7 @@ Use these for <type>: fix, feat, build, chore, ci, docs, style, refactor, perf,
Ensure the commit message:
- Starts with the appropriate prefix.
- Is in the imperative mood (e.g., \"Add feature\" not \"Added feature\" or \"Adding feature\").
- Is in the imperative mood (e.g., \"add feature\" not \"added feature\" or \"adding feature\").
- Does not exceed 72 characters.
Reply only with the one-line commit message, without any additional text, explanations, \

View File

@@ -0,0 +1,65 @@
; Definitions
(package_clause
name: (package_identifier) @name.definition.module) @definition.module
(trait_definition
name: (identifier) @name.definition.interface) @definition.interface
(enum_definition
name: (identifier) @name.definition.enum) @definition.enum
(simple_enum_case
name: (identifier) @name.definition.class) @definition.class
(full_enum_case
name: (identifier) @name.definition.class) @definition.class
(class_definition
name: (identifier) @name.definition.class) @definition.class
(object_definition
name: (identifier) @name.definition.object) @definition.object
(function_definition
name: (identifier) @name.definition.function) @definition.function
(val_definition
pattern: (identifier) @name.definition.variable) @definition.variable
(given_definition
name: (identifier) @name.definition.variable) @definition.variable
(var_definition
pattern: (identifier) @name.definition.variable) @definition.variable
(val_declaration
name: (identifier) @name.definition.variable) @definition.variable
(var_declaration
name: (identifier) @name.definition.variable) @definition.variable
(type_definition
name: (type_identifier) @name.definition.type) @definition.type
(class_parameter
name: (identifier) @name.definition.property) @definition.property
; References
(call_expression
(identifier) @name.reference.call) @reference.call
(instance_expression
(type_identifier) @name.reference.interface) @reference.interface
(instance_expression
(generic_type
(type_identifier) @name.reference.interface)) @reference.interface
(extends_clause
(type_identifier) @name.reference.class) @reference.class
(extends_clause
(generic_type
(type_identifier) @name.reference.class)) @reference.class

View File

@@ -9,6 +9,7 @@ try:
git.exc.ODBError,
git.exc.GitError,
git.exc.InvalidGitRepositoryError,
git.exc.GitCommandNotFound,
]
except ImportError:
git = None
@@ -293,13 +294,19 @@ class GitRepo:
else:
try:
iterator = commit.tree.traverse()
blob = None # Initialize blob
while True:
try:
blob = next(iterator)
if blob.type == "blob": # blob is a file
files.add(blob.path)
except IndexError:
self.io.tool_warning(f"GitRepo: read error skipping {blob.path}")
# Handle potential index error during tree traversal
# without relying on potentially unassigned 'blob'
self.io.tool_warning(
"GitRepo: Index error encountered while reading git tree object."
" Skipping."
)
continue
except StopIteration:
break

View File

@@ -398,13 +398,30 @@ class RepoMap:
# dump(fname)
rel_fname = self.get_rel_fname(fname)
current_pers = 0.0 # Start with 0 personalization score
if fname in chat_fnames:
personalization[rel_fname] = personalize
current_pers += personalize
chat_rel_fnames.add(rel_fname)
if rel_fname in mentioned_fnames:
personalization[rel_fname] = personalize
# Use max to avoid double counting if in chat_fnames and mentioned_fnames
current_pers = max(current_pers, personalize)
# Check path components against mentioned_idents
path_obj = Path(rel_fname)
path_components = set(path_obj.parts)
basename_with_ext = path_obj.name
basename_without_ext, _ = os.path.splitext(basename_with_ext)
components_to_check = path_components.union({basename_with_ext, basename_without_ext})
matched_idents = components_to_check.intersection(mentioned_idents)
if matched_idents:
# Add personalization *once* if any path component matches a mentioned ident
current_pers += personalize
if current_pers > 0:
personalization[rel_fname] = current_pers # Assign the final calculated value
tags = list(self.get_tags(fname, rel_fname))
if tags is None:

View File

@@ -63,6 +63,33 @@
//"supports_tool_choice": true,
"supports_prompt_caching": true
},
"openrouter/deepseek/deepseek-chat-v3-0324": {
"max_tokens": 8192,
"max_input_tokens": 64000,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000055,
"input_cost_per_token_cache_hit": 0.00000014,
"cache_read_input_token_cost": 0.00000014,
"cache_creation_input_token_cost": 0.0,
"output_cost_per_token": 0.00000219,
"litellm_provider": "openrouter",
"mode": "chat",
//"supports_function_calling": true,
"supports_assistant_prefill": true,
//"supports_tool_choice": true,
"supports_prompt_caching": true
},
"openrouter/deepseek/deepseek-chat-v3-0324:free": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0,
"output_cost_per_token": 0,
"litellm_provider": "openrouter",
"supports_prompt_caching": true,
"mode": "chat",
"supports_tool_choice": true
},
"fireworks_ai/accounts/fireworks/models/deepseek-r1": {
"max_tokens": 160000,
"max_input_tokens": 128000,
@@ -81,6 +108,15 @@
"output_cost_per_token": 0.0000009,
"mode": "chat",
},
"fireworks_ai/accounts/fireworks/models/deepseek-v3-0324": {
"max_tokens": 160000,
"max_input_tokens": 100000,
"max_output_tokens": 8192,
"litellm_provider": "fireworks_ai",
"input_cost_per_token": 0.0000009,
"output_cost_per_token": 0.0000009,
"mode": "chat",
},
"o3-mini": {
"max_tokens": 100000,
"max_input_tokens": 200000,
@@ -129,6 +165,26 @@
"supports_system_messages": true,
"supports_response_schema": true
},
"openrouter/openrouter/quasar-alpha": {
"max_input_tokens": 1000000,
"max_output_tokens": 32000,
"input_cost_per_token": 0.0,
"output_cost_per_token": 0.0,
"litellm_provider": "openrouter",
"mode": "chat",
"supports_vision": true,
"supports_function_calling": true,
"supports_system_messages": true,
"supports_prompt_caching": true
},
"openrouter/openrouter/optimus-alpha": {
"max_input_tokens": 1000000,
"max_output_tokens": 32000,
"input_cost_per_token": 0.0,
"output_cost_per_token": 0.0,
"litellm_provider": "openrouter",
"mode": "chat"
},
"openrouter/openai/gpt-4o-mini": {
"max_tokens": 16384,
"max_input_tokens": 128000,
@@ -244,7 +300,7 @@
"gemini/gemini-2.5-pro-exp-03-25": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 8192,
"max_output_tokens": 64000,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
@@ -278,10 +334,46 @@
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"gemini/gemini-2.5-pro-preview-03-25": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 64000,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
"max_audio_length_hours": 8.4,
"max_audio_per_prompt": 1,
"max_pdf_size_mb": 30,
"input_cost_per_image": 0,
"input_cost_per_video_per_second": 0,
"input_cost_per_audio_per_second": 0,
"input_cost_per_token": 0.00000125,
"input_cost_per_character": 0,
"input_cost_per_token_above_128k_tokens": 0,
"input_cost_per_character_above_128k_tokens": 0,
"input_cost_per_image_above_128k_tokens": 0,
"input_cost_per_video_per_second_above_128k_tokens": 0,
"input_cost_per_audio_per_second_above_128k_tokens": 0,
"output_cost_per_token": 0.000010,
"output_cost_per_character": 0,
"output_cost_per_token_above_128k_tokens": 0,
"output_cost_per_character_above_128k_tokens": 0,
"litellm_provider": "gemini",
"mode": "chat",
"supports_system_messages": true,
"supports_function_calling": true,
"supports_vision": true,
"supports_audio_input": true,
"supports_video_input": true,
"supports_pdf_input": true,
"supports_response_schema": true,
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"vertex_ai/gemini-2.5-pro-exp-03-25": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 8192,
"max_output_tokens": 64000,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
@@ -314,10 +406,82 @@
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"vertex_ai/gemini-2.5-pro-preview-03-25": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 64000,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
"max_audio_length_hours": 8.4,
"max_audio_per_prompt": 1,
"max_pdf_size_mb": 30,
"input_cost_per_image": 0,
"input_cost_per_video_per_second": 0,
"input_cost_per_audio_per_second": 0,
"input_cost_per_token": 0.00000125,
"input_cost_per_character": 0,
"input_cost_per_token_above_128k_tokens": 0,
"input_cost_per_character_above_128k_tokens": 0,
"input_cost_per_image_above_128k_tokens": 0,
"input_cost_per_video_per_second_above_128k_tokens": 0,
"input_cost_per_audio_per_second_above_128k_tokens": 0,
"output_cost_per_token": 0.000010,
"output_cost_per_character": 0,
"output_cost_per_token_above_128k_tokens": 0,
"output_cost_per_character_above_128k_tokens": 0,
"litellm_provider": "vertex_ai-language-models",
"mode": "chat",
"supports_system_messages": true,
"supports_function_calling": true,
"supports_vision": true,
"supports_audio_input": true,
"supports_video_input": true,
"supports_pdf_input": true,
"supports_response_schema": true,
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"openrouter/google/gemini-2.5-pro-preview-03-25": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 64000,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
"max_audio_length_hours": 8.4,
"max_audio_per_prompt": 1,
"max_pdf_size_mb": 30,
"input_cost_per_image": 0,
"input_cost_per_video_per_second": 0,
"input_cost_per_audio_per_second": 0,
"input_cost_per_token": 0.00000125,
"input_cost_per_character": 0,
"input_cost_per_token_above_128k_tokens": 0,
"input_cost_per_character_above_128k_tokens": 0,
"input_cost_per_image_above_128k_tokens": 0,
"input_cost_per_video_per_second_above_128k_tokens": 0,
"input_cost_per_audio_per_second_above_128k_tokens": 0,
"output_cost_per_token": 0.000010,
"output_cost_per_character": 0,
"output_cost_per_token_above_128k_tokens": 0,
"output_cost_per_character_above_128k_tokens": 0,
"litellm_provider": "vertex_ai-language-models",
"mode": "chat",
"supports_system_messages": true,
"supports_function_calling": true,
"supports_vision": true,
"supports_audio_input": true,
"supports_video_input": true,
"supports_pdf_input": true,
"supports_response_schema": true,
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"openrouter/google/gemini-2.5-pro-exp-03-25:free": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 8192,
"max_output_tokens": 64000,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
@@ -328,9 +492,9 @@
"input_cost_per_video_per_second": 0,
"input_cost_per_audio_per_second": 0,
"input_cost_per_token": 0,
"input_cost_per_character": 0,
"input_cost_per_token_above_128k_tokens": 0,
"input_cost_per_character_above_128k_tokens": 0,
"input_cost_per_character": 0,
"input_cost_per_token_above_128k_tokens": 0,
"input_cost_per_character_above_128k_tokens": 0,
"input_cost_per_image_above_128k_tokens": 0,
"input_cost_per_video_per_second_above_128k_tokens": 0,
"input_cost_per_audio_per_second_above_128k_tokens": 0,
@@ -350,4 +514,59 @@
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"openrouter/x-ai/grok-3-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0.000003,
"output_cost_per_token": 0.000015,
"litellm_provider": "openrouter",
"mode": "chat"
},
"xai/grok-3-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0.000003,
"output_cost_per_token": 0.000015,
"litellm_provider": "xai",
"mode": "chat"
},
"openrouter/x-ai/grok-3-mini-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0.0000003,
"output_cost_per_token": 0.0000005,
"litellm_provider": "openrouter",
"mode": "chat"
},
"xai/grok-3-mini-beta": {
"max_tokens": 131072,
"max_input_tokens": 131072,
"max_output_tokens": 131072,
"input_cost_per_token": 0.0000003,
"output_cost_per_token": 0.0000005,
"litellm_provider": "xai",
"mode": "chat"
},
"openrouter/google/gemini-2.0-flash-exp:free": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 8192,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
"max_audio_length_hours": 8.4,
"max_audio_per_prompt": 1,
"max_pdf_size_mb": 30,
"litellm_provider": "openrouter",
"mode": "chat",
"supports_system_messages": true,
"supports_function_calling": true,
"supports_vision": true,
"supports_response_schema": true,
"supports_audio_output": true,
"supports_tool_choice": true
},
}

View File

@@ -583,6 +583,16 @@
extra_params:
max_tokens: 8192
caches_by_default: true
- name: openrouter/deepseek/deepseek-chat-v3-0324:free
edit_format: diff
weak_model_name: openrouter/deepseek/deepseek-chat-v3-0324:free
use_repo_map: true
examples_as_sys_msg: true
caches_by_default: true
use_temperature: false
editor_model_name: openrouter/deepseek/deepseek-chat-v3-0324:free
editor_edit_format: editor-diff
use_temperature: false
editor_model_name: openrouter/deepseek/deepseek-r1:free
editor_edit_format: editor-diff
@@ -658,6 +668,15 @@
reminder: sys
examples_as_sys_msg: true
- name: openrouter/deepseek/deepseek-chat-v3-0324
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
- name: openrouter/openai/gpt-4o
edit_format: diff
weak_model_name: openrouter/openai/gpt-4o-mini
@@ -798,7 +817,7 @@
use_temperature: false
editor_model_name: openrouter/deepseek/deepseek-chat
editor_edit_format: editor-diff
- name: fireworks_ai/accounts/fireworks/models/deepseek-r1
edit_format: diff
weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
@@ -819,6 +838,14 @@
extra_params:
max_tokens: 128000
- name: fireworks_ai/accounts/fireworks/models/deepseek-v3-0324
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 160000
- name: openai/o3-mini
edit_format: diff
weak_model_name: gpt-4o-mini
@@ -828,7 +855,7 @@
editor_edit_format: editor-diff
system_prompt_prefix: "Formatting re-enabled. "
accepts_settings: ["reasoning_effort"]
- name: o3-mini
edit_format: diff
weak_model_name: gpt-4o-mini
@@ -878,7 +905,7 @@
examples_as_sys_msg: true
editor_model_name: gpt-4o
editor_edit_format: editor-diff
- name: openai/gpt-4.5-preview
edit_format: diff
weak_model_name: gpt-4o-mini
@@ -923,22 +950,73 @@
- name: gemini/gemma-3-27b-it
use_system_prompt: false
- name: openrouter/google/gemma-3-27b-it:free
use_system_prompt: false
- name: openrouter/google/gemma-3-27b-it
use_system_prompt: false
- name: gemini/gemini-2.5-pro-preview-03-25
edit_format: diff-fenced
use_repo_map: true
weak_model_name: gemini/gemini-2.0-flash
- name: gemini/gemini-2.5-pro-exp-03-25
edit_format: diff-fenced
use_repo_map: true
weak_model_name: gemini/gemini-2.0-flash
- name: openrouter/google/gemini-2.5-pro-exp-03-25:free
edit_format: diff-fenced
use_repo_map: true
weak_model_name: openrouter/google/gemini-2.0-flash-exp:free
- name: vertex_ai/gemini-2.5-pro-exp-03-25
edit_format: diff-fenced
use_repo_map: true
# Need metadata for this one...
#weak_model_name: vertex_ai/gemini-2.0-flash
- name: vertex_ai/gemini-2.5-pro-preview-03-25
edit_format: diff-fenced
use_repo_map: true
# Need metadata for this one...
#weak_model_name: vertex_ai/gemini-2.0-flash
- name: openrouter/openrouter/quasar-alpha
use_repo_map: true
edit_format: diff
examples_as_sys_msg: true
- name: openrouter/x-ai/grok-3-beta
use_repo_map: true
edit_format: diff
- name: xai/grok-3-beta
use_repo_map: true
edit_format: diff
- name: openrouter/x-ai/grok-3-mini-beta
use_repo_map: true
edit_format: whole
accepts_settings:
- reasoning_effort
#extra_params:
# extra_body:
# reasoning_effort: high
- name: xai/grok-3-mini-beta
use_repo_map: true
edit_format: whole
accepts_settings:
- reasoning_effort
#extra_params:
# extra_body:
# reasoning_effort: low
- name: openrouter/openrouter/optimus-alpha
use_repo_map: true
edit_format: diff
examples_as_sys_msg: true

View File

@@ -159,7 +159,8 @@ class Scraper:
try:
response = page.goto(url, wait_until="networkidle", timeout=5000)
except PlaywrightTimeoutError:
self.print_error(f"Timeout while loading {url}")
print(f"Page didn't quiesce, scraping content anyway: {url}")
response = None
except PlaywrightError as e:
self.print_error(f"Error navigating to {url}: {str(e)}")
return None, None

View File

@@ -308,7 +308,11 @@ def find_common_root(abs_fnames):
except OSError:
pass
return safe_abs_path(os.getcwd())
try:
return safe_abs_path(os.getcwd())
except FileNotFoundError:
# Fallback if cwd is deleted
return "."
def format_tokens(count):

View File

@@ -64,7 +64,7 @@ class FileWatcher:
"""Watches source files for changes and AI comments"""
# Compiled regex pattern for AI comments
ai_comment_pattern = re.compile(r"(?:#|//|--) *(ai\b.*|ai\b.*|.*\bai[?!]?) *$", re.IGNORECASE)
ai_comment_pattern = re.compile(r"(?:#|//|--|;+) *(ai\b.*|ai\b.*|.*\bai[?!]?) *$", re.IGNORECASE)
def __init__(self, coder, gitignores=None, verbose=False, analytics=None, root=None):
self.coder = coder
@@ -262,7 +262,7 @@ class FileWatcher:
line_nums.append(i)
comments.append(comment)
comment = comment.lower()
comment = comment.lstrip("/#-")
comment = comment.lstrip("/#-;") # Added semicolon for Lisp comments
comment = comment.strip()
if comment.startswith("ai!") or comment.endswith("ai!"):
has_action = "!"

View File

@@ -24,6 +24,95 @@ cog.out(text)
]]]-->
### main branch
- Commit messages generated by aider are no longer forced to be entirely lowercase, by Peter Hadlaw.
- Updated default settings for Grok models.
- Aider wrote 64% of the code in this release.
### Aider v0.81.2
- Add support for `xai/grok-3-beta`, `xai/grok-3-mini-beta`, `openrouter/x-ai/grok-3-beta`, `openrouter/x-ai/grok-3-mini-beta`, and `openrouter/openrouter/optimus-alpha` models.
- Add alias "grok3" for `xai/grok-3-beta`.
- Add alias "optimus" for `openrouter/openrouter/optimus-alpha`.
- Fix URL extraction from error messages.
- Allow adding files by full path even if a file with the same basename is already in the chat.
- Fix quoting of values containing '#' in the sample `aider.conf.yml`.
- Add support for Fireworks AI model 'deepseek-v3-0324', by Felix Lisczyk.
- Commit messages generated by aider are now lowercase, by Anton Ödman.
- Aider wrote 64% of the code in this release.
### Aider v0.81.1
- Added support for the `gemini/gemini-2.5-pro-preview-03-25` model.
- Updated the `gemini` alias to point to `gemini/gemini-2.5-pro-preview-03-25`.
- Added the `gemini-exp` alias for `gemini/gemini-2.5-pro-exp-03-25`.
- Aider wrote 87% of the code in this release.
### Aider v0.81.0
- Added support for the `openrouter/openrouter/quasar-alpha` model.
- Run with `aider --model quasar`
- Offer OpenRouter OAuth authentication if an OpenRouter model is specified but the API key is missing.
- Prevent retrying API calls when the provider reports insufficient credits.
- Improve URL detection to exclude trailing double quotes.
- Aider wrote 86% of the code in this release.
### Aider v0.80.4
- Bumped deps to pickup litellm change to properly display the root cause of OpenRouter "choices" errors.
### Aider v0.80.3
- Improve error message for OpenRouter API connection issues to mention potential rate limiting or upstream provider issues.
- Configure weak models (`gemini/gemini-2.0-flash` and `openrouter/google/gemini-2.0-flash-exp:free`) for Gemini 2.5 Pro models.
- Add model metadata for `openrouter/google/gemini-2.0-flash-exp:free`.
### Aider v0.80.2
- Bumped deps.
### Aider v0.80.1
- Updated deps for yanked fsspec and aiohttp packages #3699
- Removed redundant dependency check during OpenRouter OAuth flow, by Claudia Pellegrino.
### Aider v0.80.0
- OpenRouter OAuth integration:
- Offer to OAuth against OpenRouter if no model and keys are provided.
- Select OpenRouter default model based on free/paid tier status if `OPENROUTER_API_KEY` is set and no model is specified.
- Prioritize `gemini/gemini-2.5-pro-exp-03-25` if `GEMINI_API_KEY` is set, and `vertex_ai/gemini-2.5-pro-exp-03-25` if `VERTEXAI_PROJECT` is set, when no model is specified.
- Validate user-configured color settings on startup and warn/disable invalid ones.
- Warn at startup if `--stream` and `--cache-prompts` are used together, as cost estimates may be inaccurate.
- Boost repomap ranking for files whose path components match identifiers mentioned in the chat.
- Change web scraping timeout from an error to a warning, allowing scraping to continue with potentially incomplete content.
- Left-align markdown headings in the terminal output, by Peter Schilling.
- Update edit format to the new model's default when switching models with `/model`, if the user was using the old model's default format.
- Add `Ctrl-X Ctrl-E` keybinding to edit the current input buffer in an external editor, by Matteo Landi.
- Fix linting errors for filepaths containing shell metacharacters, by Mir Adnan ALI.
- Add the `openrouter/deepseek-chat-v3-0324:free` model.
- Add repomap support for the Scala language, by Vasil Markoukin.
- Fixed bug in `/run` that was preventing auto-testing.
- Fix bug preventing `UnboundLocalError` during git tree traversal.
- Handle `GitCommandNotFound` error if git is not installed or not in PATH.
- Handle `FileNotFoundError` if the current working directory is deleted while aider is running.
- Fix completion menu current item color styling, by Andrey Ivanov.
- Aider wrote 87% of the code in this release.
### Aider v0.79.2
- Added 'gemini' alias for gemini-2.5-pro model.
- Updated Gemini 2.5 Pro max output tokens to 64k.
- Added support for Lisp-style semicolon comments in file watcher, by Matteo Landi.
- Added OpenRouter API error detection and retries.
- Added openrouter/deepseek-chat-v3-0324 model.
- Aider wrote 93% of the code in this release.
### Aider v0.79.1
- Improved model listing to include all models in fuzzy matching, including those provided by aider (not litellm).
### Aider v0.79.0
- Added support for Gemini 2.5 Pro models.

View File

@@ -4319,3 +4319,132 @@
Paul Gauthier (aider): 221
start_tag: v0.78.0
total_lines: 338
- aider_percentage: 86.86
aider_total: 1837
end_date: '2025-03-31'
end_tag: v0.80.0
file_counts:
aider/__init__.py:
Paul Gauthier: 1
aider/coders/base_coder.py:
Paul Gauthier: 2
aider/commands.py:
Paul Gauthier: 4
Paul Gauthier (aider): 20
aider/exceptions.py:
Paul Gauthier: 1
Paul Gauthier (aider): 3
aider/io.py:
Andrey Ivanov: 2
Matteo Landi (aider): 11
Paul Gauthier (aider): 38
aider/linter.py:
Mir Adnan ALI: 2
aider/main.py:
Paul Gauthier: 1
Paul Gauthier (aider): 21
aider/mdstream.py:
Peter Schilling (aider) (aider): 25
aider/models.py:
Paul Gauthier: 12
Paul Gauthier (aider): 9
aider/onboarding.py:
Paul Gauthier: 44
Paul Gauthier (aider): 389
aider/queries/tree-sitter-languages/scala-tags.scm:
Vasil Markoukin: 65
aider/repo.py:
Paul Gauthier: 1
Paul Gauthier (aider): 7
aider/repomap.py:
Paul Gauthier (aider): 19
aider/resources/model-settings.yml:
Paul Gauthier (aider): 13
aider/scrape.py:
Paul Gauthier: 1
Paul Gauthier (aider): 1
aider/utils.py:
Paul Gauthier (aider): 5
aider/watch.py:
Matteo Landi (aider): 2
aider/website/_includes/leaderboard.js:
Paul Gauthier: 1
Paul Gauthier (aider): 2
aider/website/docs/leaderboards/index.md:
Paul Gauthier: 1
aider/website/index.html:
Paul Gauthier: 51
Paul Gauthier (aider): 175
scripts/30k-image.py:
Paul Gauthier: 8
Paul Gauthier (aider): 227
scripts/homepage.py:
Paul Gauthier (aider): 122
tests/basic/test_commands.py:
Paul Gauthier: 2
Paul Gauthier (aider): 48
tests/basic/test_exceptions.py:
Paul Gauthier (aider): 17
tests/basic/test_io.py:
Paul Gauthier (aider): 28
tests/basic/test_main.py:
Paul Gauthier: 15
Paul Gauthier (aider): 199
tests/basic/test_onboarding.py:
Paul Gauthier (aider): 439
tests/basic/test_repomap.py:
Vasil Markoukin: 3
tests/basic/test_ssl_verification.py:
Paul Gauthier (aider): 8
tests/basic/test_watch.py:
Matteo Landi (aider): 9
tests/fixtures/languages/scala/test.scala:
Vasil Markoukin: 61
grand_total:
Andrey Ivanov: 2
Matteo Landi (aider): 22
Mir Adnan ALI: 2
Paul Gauthier: 145
Paul Gauthier (aider): 1790
Peter Schilling (aider) (aider): 25
Vasil Markoukin: 129
start_tag: v0.79.0
total_lines: 2115
- aider_percentage: 85.55
aider_total: 225
end_date: '2025-04-04'
end_tag: v0.81.0
file_counts:
.github/workflows/check_pypi_version.yml:
Paul Gauthier: 11
Paul Gauthier (aider): 75
.github/workflows/windows_check_pypi_version.yml:
Paul Gauthier: 4
Paul Gauthier (aider): 86
aider/__init__.py:
Paul Gauthier: 1
aider/coders/base_coder.py:
Paul Gauthier (aider): 4
aider/exceptions.py:
Paul Gauthier: 6
Paul Gauthier (aider): 12
aider/main.py:
Paul Gauthier (aider): 40
aider/models.py:
Paul Gauthier (aider): 2
aider/resources/model-settings.yml:
Paul Gauthier: 9
Paul Gauthier (aider): 1
aider/website/_includes/leaderboard.js:
Paul Gauthier (aider): 5
aider/website/docs/leaderboards/index.md:
Paul Gauthier: 1
aider/website/index.html:
Paul Gauthier: 3
tests/basic/test_exceptions.py:
Paul Gauthier: 3
grand_total:
Paul Gauthier: 38
Paul Gauthier (aider): 225
start_tag: v0.80.0
total_lines: 263

View File

@@ -807,28 +807,210 @@
seconds_per_case: 290.0
total_cost: 1.1164
- dirname: 2025-03-25-19-46-45--gemini-25-pro-exp-diff-fenced
- dirname: 2025-04-12-04-55-50--gemini-25-pro-diff-fenced
test_cases: 225
model: Gemini 2.5 Pro exp-03-25
model: Gemini 2.5 Pro Preview 03-25
edit_format: diff-fenced
commit_hash: 33413ec
pass_rate_1: 39.1
commit_hash: 0282574
pass_rate_1: 40.9
pass_rate_2: 72.9
pass_num_1: 88
pass_num_1: 92
pass_num_2: 164
percent_cases_well_formed: 89.8
error_outputs: 30
num_malformed_responses: 30
num_with_malformed_responses: 23
user_asks: 57
percent_cases_well_formed: 92.4
error_outputs: 21
num_malformed_responses: 21
num_with_malformed_responses: 17
user_asks: 69
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
total_tests: 225
command: aider --model gemini/gemini-2.5-pro-preview-03-25
date: 2025-04-12
versions: 0.81.3.dev
seconds_per_case: 45.3
total_cost: 6.3174
- dirname: 2025-03-29-05-24-55--chatgpt4o-mar28-diff
test_cases: 225
model: chatgpt-4o-latest (2025-03-29)
edit_format: diff
commit_hash: 0decbad
pass_rate_1: 16.4
pass_rate_2: 45.3
pass_num_1: 37
pass_num_2: 102
percent_cases_well_formed: 64.4
error_outputs: 85
num_malformed_responses: 85
num_with_malformed_responses: 80
user_asks: 174
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
total_tests: 225
command: aider --model chatgpt-4o-latest
date: 2025-03-29
versions: 0.79.3.dev
seconds_per_case: 10.3
total_cost: 19.7416
- dirname: 2025-04-04-02-57-25--qalpha-diff-exsys
test_cases: 225
model: Quasar Alpha
edit_format: diff
commit_hash: 8a34a6c-dirty
pass_rate_1: 21.8
pass_rate_2: 54.7
pass_num_1: 49
pass_num_2: 123
percent_cases_well_formed: 98.2
error_outputs: 4
num_malformed_responses: 4
num_with_malformed_responses: 4
user_asks: 187
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
total_tests: 225
command: aider --model openrouter/openrouter/quasar-alpha
date: 2025-04-04
versions: 0.80.5.dev
seconds_per_case: 14.8
total_cost: 0.0000
- dirname: 2025-04-06-08-39-52--llama-4-maverick-17b-128e-instruct-polyglot-whole
test_cases: 225
model: Llama 4 Maverick
edit_format: whole
commit_hash: 9445a31
pass_rate_1: 4.4
pass_rate_2: 15.6
pass_num_1: 10
pass_num_2: 35
percent_cases_well_formed: 99.1
error_outputs: 12
num_malformed_responses: 2
num_with_malformed_responses: 2
user_asks: 248
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
total_tests: 225
command: aider --model nvidia_nim/meta/llama-4-maverick-17b-128e-instruct
date: 2025-04-06
versions: 0.81.2.dev
seconds_per_case: 20.5
total_cost: 0.0000
- dirname: 2025-04-10-04-21-31--grok3-diff-exuser
test_cases: 225
model: Grok 3 Beta
edit_format: diff
commit_hash: 2dd40fc-dirty
pass_rate_1: 22.2
pass_rate_2: 53.3
pass_num_1: 50
pass_num_2: 120
percent_cases_well_formed: 99.6
error_outputs: 1
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 68
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
total_tests: 225
command: aider --model openrouter/x-ai/grok-3-beta
date: 2025-04-10
versions: 0.81.2.dev
seconds_per_case: 15.3
total_cost: 11.0338
- dirname: 2025-04-10-18-47-24--grok3-mini-whole-exuser
test_cases: 225
model: Grok 3 Mini Beta (low)
edit_format: whole
commit_hash: 14ffe77-dirty
pass_rate_1: 11.1
pass_rate_2: 34.7
pass_num_1: 25
pass_num_2: 78
percent_cases_well_formed: 100.0
error_outputs: 3
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 73
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 5
total_tests: 225
command: aider --model openrouter/x-ai/grok-3-mini-beta
date: 2025-04-10
versions: 0.81.2.dev
seconds_per_case: 35.1
total_cost: 0.7856
- dirname: 2025-04-10-23-59-02--xai-grok3-mini-whole-high
test_cases: 225
model: Grok 3 Mini Beta (high)
edit_format: whole
commit_hash: 8ee33da-dirty
pass_rate_1: 17.3
pass_rate_2: 49.3
pass_num_1: 39
pass_num_2: 111
percent_cases_well_formed: 99.6
error_outputs: 1
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 64
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 0
total_tests: 225
command: aider --model xai/grok-3-mini-beta --reasoning-effort high
date: 2025-04-10
versions: 0.81.3.dev
seconds_per_case: 79.1
total_cost: 0.7346
- dirname: 2025-04-10-19-02-44--oalpha-diff-exsys
test_cases: 225
model: Optimus Alpha
edit_format: diff
commit_hash: 532bc45-dirty
pass_rate_1: 21.3
pass_rate_2: 52.9
pass_num_1: 48
pass_num_2: 119
percent_cases_well_formed: 97.3
error_outputs: 7
num_malformed_responses: 6
num_with_malformed_responses: 6
user_asks: 182
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
total_tests: 225
command: aider --model gemini/gemini-2.5-pro-exp-03-25
date: 2025-03-25
versions: 0.78.1.dev
seconds_per_case: 47.1
command: aider --model openrouter/openrouter/optimus-alpha
date: 2025-04-10
versions: 0.81.2.dev
seconds_per_case: 18.4
total_cost: 0.0000

View File

@@ -4,7 +4,11 @@ document.addEventListener('DOMContentLoaded', function () {
const redDiagonalPattern = pattern.draw('diagonal', 'rgba(255, 99, 132, 0.2)');
let displayedData = [];
const HIGHLIGHT_MODEL = '{{ highlight_model | default: "no no no" }}';
// Get highlight model from query string or Jekyll variable
const urlParams = new URLSearchParams(window.location.search);
const queryHighlight = urlParams.get('highlight');
const HIGHLIGHT_MODEL = queryHighlight || '{{ highlight_model | default: "no no no" }}';
var leaderboardData = {
labels: [],
datasets: [{
@@ -13,14 +17,14 @@ document.addEventListener('DOMContentLoaded', function () {
backgroundColor: function(context) {
const row = allData[context.dataIndex];
if (row && row.edit_format === 'whole') {
return diagonalPattern;
return redDiagonalPattern; // Use red pattern for highlighted whole format
}
const label = leaderboardData.labels[context.dataIndex] || '';
return (label && label.includes(HIGHLIGHT_MODEL)) ? 'rgba(255, 99, 132, 0.2)' : 'rgba(54, 162, 235, 0.2)';
return (label && HIGHLIGHT_MODEL && label.toLowerCase().includes(HIGHLIGHT_MODEL.toLowerCase())) ? 'rgba(255, 99, 132, 0.2)' : 'rgba(54, 162, 235, 0.2)';
},
borderColor: function(context) {
const label = context.chart.data.labels[context.dataIndex] || '';
return (label && label.includes(HIGHLIGHT_MODEL)) ? 'rgba(255, 99, 132, 1)' : 'rgba(54, 162, 235, 1)';
return (label && HIGHLIGHT_MODEL && label.toLowerCase().includes(HIGHLIGHT_MODEL.toLowerCase())) ? 'rgba(255, 99, 132, 1)' : 'rgba(54, 162, 235, 1)';
},
borderWidth: 1
}, {
@@ -74,11 +78,13 @@ document.addEventListener('DOMContentLoaded', function () {
leaderboardChart.render();
}
// Use displayedData in the backgroundColor callback instead of allData
// Update backgroundColor and borderColor for the main dataset based on displayedData
leaderboardData.datasets[0].backgroundColor = function(context) {
const row = displayedData[context.dataIndex];
const label = leaderboardData.labels[context.dataIndex] || '';
if (label && label.includes(HIGHLIGHT_MODEL)) {
const isHighlighted = label && HIGHLIGHT_MODEL && label.toLowerCase().includes(HIGHLIGHT_MODEL.toLowerCase());
if (isHighlighted) {
if (row && row.edit_format === 'whole') return redDiagonalPattern;
else return 'rgba(255, 99, 132, 0.2)';
} else if (row && row.edit_format === 'whole') {
@@ -171,6 +177,9 @@ document.addEventListener('DOMContentLoaded', function () {
},
x: {
ticks: {
autoSkip: false, // Prevent labels from being automatically skipped
maxRotation: 90, // Allow labels to rotate up to 90 degrees
minRotation: 0,
callback: function(value, index) {
const label = this.getLabelForValue(value);
if (label.length <= "claude-3-5-sonnet".length) {

Binary file not shown.

After

Width:  |  Height:  |  Size: 260 KiB

File diff suppressed because it is too large Load Diff

View File

@@ -171,19 +171,19 @@
#stream: true
## Set the color for user input (default: #00cc00)
#user-input-color: #00cc00
#user-input-color: "#00cc00"
## Set the color for tool output (default: None)
#tool-output-color: "xxx"
## Set the color for tool error messages (default: #FF2222)
#tool-error-color: #FF2222
#tool-error-color: "#FF2222"
## Set the color for tool warning messages (default: #FFA500)
#tool-warning-color: #FFA500
#tool-warning-color: "#FFA500"
## Set the color for assistant output (default: #0088ff)
#assistant-output-color: #0088ff
#assistant-output-color: "#0088ff"
## Set the color for the completion menu (default: terminal's default text color)
#completion-menu-color: "xxx"

View File

@@ -569,6 +569,14 @@ cog.out("```\n")
extra_params:
max_tokens: 128000
- name: fireworks_ai/accounts/fireworks/models/deepseek-v3-0324
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 160000
- name: fireworks_ai/accounts/fireworks/models/qwq-32b
edit_format: diff
weak_model_name: fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct
@@ -612,6 +620,12 @@ cog.out("```\n")
- name: gemini/gemini-2.5-pro-exp-03-25
edit_format: diff-fenced
weak_model_name: gemini/gemini-2.0-flash
use_repo_map: true
- name: gemini/gemini-2.5-pro-preview-03-25
edit_format: diff-fenced
weak_model_name: gemini/gemini-2.0-flash
use_repo_map: true
- name: gemini/gemini-exp-1114
@@ -948,6 +962,25 @@ cog.out("```\n")
reminder: sys
examples_as_sys_msg: true
- name: openrouter/deepseek/deepseek-chat-v3-0324
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
- name: openrouter/deepseek/deepseek-chat-v3-0324:free
edit_format: diff
weak_model_name: openrouter/deepseek/deepseek-chat-v3-0324:free
use_repo_map: true
examples_as_sys_msg: true
caches_by_default: true
use_temperature: false
editor_model_name: openrouter/deepseek/deepseek-r1:free
editor_edit_format: editor-diff
- name: openrouter/deepseek/deepseek-chat:free
edit_format: diff
weak_model_name: openrouter/deepseek/deepseek-chat:free
@@ -998,12 +1031,10 @@ cog.out("```\n")
extra_params:
max_tokens: 8192
caches_by_default: true
use_temperature: false
editor_model_name: openrouter/deepseek/deepseek-r1:free
editor_edit_format: editor-diff
- name: openrouter/google/gemini-2.5-pro-exp-03-25:free
edit_format: diff-fenced
weak_model_name: openrouter/google/gemini-2.0-flash-exp:free
use_repo_map: true
- name: openrouter/google/gemma-3-27b-it
@@ -1079,6 +1110,16 @@ cog.out("```\n")
accepts_settings:
- reasoning_effort
- name: openrouter/openrouter/optimus-alpha
edit_format: diff
use_repo_map: true
examples_as_sys_msg: true
- name: openrouter/openrouter/quasar-alpha
edit_format: diff
use_repo_map: true
examples_as_sys_msg: true
- name: openrouter/qwen/qwen-2.5-coder-32b-instruct
edit_format: diff
weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
@@ -1086,6 +1127,15 @@ cog.out("```\n")
editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
editor_edit_format: editor-diff
- name: openrouter/x-ai/grok-3-beta
edit_format: diff
use_repo_map: true
- name: openrouter/x-ai/grok-3-mini-beta
use_repo_map: true
accepts_settings:
- reasoning_effort
- name: vertex_ai-anthropic_models/vertex_ai/claude-3-7-sonnet@20250219
edit_format: diff
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
@@ -1151,9 +1201,22 @@ cog.out("```\n")
edit_format: diff-fenced
use_repo_map: true
- name: vertex_ai/gemini-2.5-pro-preview-03-25
edit_format: diff-fenced
use_repo_map: true
- name: vertex_ai/gemini-pro-experimental
edit_format: diff-fenced
use_repo_map: true
- name: xai/grok-3-beta
edit_format: diff
use_repo_map: true
- name: xai/grok-3-mini-beta
use_repo_map: true
accepts_settings:
- reasoning_effort
```
<!--[[[end]]]-->

View File

@@ -225,19 +225,19 @@ cog.outl("```")
#stream: true
## Set the color for user input (default: #00cc00)
#user-input-color: #00cc00
#user-input-color: "#00cc00"
## Set the color for tool output (default: None)
#tool-output-color: "xxx"
## Set the color for tool error messages (default: #FF2222)
#tool-error-color: #FF2222
#tool-error-color: "#FF2222"
## Set the color for tool warning messages (default: #FFA500)
#tool-warning-color: #FFA500
#tool-warning-color: "#FFA500"
## Set the color for assistant output (default: #0088ff)
#assistant-output-color: #0088ff
#assistant-output-color: "#0088ff"
## Set the color for the completion menu (default: terminal's default text color)
#completion-menu-color: "xxx"

View File

@@ -80,9 +80,14 @@ for alias, model in sorted(MODEL_ALIASES.items()):
- `4o`: gpt-4o
- `deepseek`: deepseek/deepseek-chat
- `flash`: gemini/gemini-2.0-flash-exp
- `gemini`: gemini/gemini-2.5-pro-preview-03-25
- `gemini-2.5-pro`: gemini/gemini-2.5-pro-exp-03-25
- `gemini-exp`: gemini/gemini-2.5-pro-exp-03-25
- `grok3`: xai/grok-3-beta
- `haiku`: claude-3-5-haiku-20241022
- `optimus`: openrouter/openrouter/optimus-alpha
- `opus`: claude-3-opus-20240229
- `quasar`: openrouter/openrouter/quasar-alpha
- `r1`: deepseek/deepseek-reasoner
- `sonnet`: anthropic/claude-3-7-sonnet-20250219
<!--[[[end]]]-->

View File

@@ -264,17 +264,13 @@ tr:hover { background-color: #f5f5f5; }
</style>
<table>
<tr><th>Model Name</th><th class='right'>Total Tokens</th><th class='right'>Percent</th></tr>
<tr><td>fireworks_ai/accounts/fireworks/models/deepseek-v3</td><td class='right'>1,564,808</td><td class='right'>36.4%</td></tr>
<tr><td>anthropic/claude-3-7-sonnet-20250219</td><td class='right'>1,499,523</td><td class='right'>34.9%</td></tr>
<tr><td>fireworks_ai/accounts/fireworks/models/deepseek-r1</td><td class='right'>380,307</td><td class='right'>8.8%</td></tr>
<tr><td>deepseek/deepseek-chat</td><td class='right'>312,589</td><td class='right'>7.3%</td></tr>
<tr><td>gpt-4o</td><td class='right'>243,123</td><td class='right'>5.7%</td></tr>
<tr><td>gemini/gemini-2.5-pro-exp-03-25</td><td class='right'>150,031</td><td class='right'>3.5%</td></tr>
<tr><td>claude-3-5-haiku-20241022</td><td class='right'>81,038</td><td class='right'>1.9%</td></tr>
<tr><td>o3-mini</td><td class='right'>48,351</td><td class='right'>1.1%</td></tr>
<tr><td>openrouter/google/gemini-2.5-pro-exp-03-25:free</td><td class='right'>11,449</td><td class='right'>0.3%</td></tr>
<tr><td>gemini/REDACTED</td><td class='right'>5,772</td><td class='right'>0.1%</td></tr>
<tr><td>openrouter/REDACTED</td><td class='right'>3,830</td><td class='right'>0.1%</td></tr>
<tr><td>gemini/gemini-2.5-pro-exp-03-25</td><td class='right'>1,119,621</td><td class='right'>77.4%</td></tr>
<tr><td>gemini/gemini-2.5-pro-preview-03-25</td><td class='right'>269,898</td><td class='right'>18.6%</td></tr>
<tr><td>openrouter/anthropic/claude-3.7-sonnet</td><td class='right'>18,140</td><td class='right'>1.3%</td></tr>
<tr><td>o3-mini</td><td class='right'>17,296</td><td class='right'>1.2%</td></tr>
<tr><td>openrouter/x-ai/grok-3-mini-beta</td><td class='right'>16,987</td><td class='right'>1.2%</td></tr>
<tr><td>openrouter/REDACTED</td><td class='right'>4,099</td><td class='right'>0.3%</td></tr>
<tr><td>xai/grok-3-mini-beta</td><td class='right'>1,224</td><td class='right'>0.1%</td></tr>
</table>
{: .note :}

View File

@@ -36,17 +36,16 @@ If you can find and share that file in a
[GitHub issue](https://github.com/Aider-AI/aider/issues),
then it may be possible to add repo map support.
If aider doesn't support linting, it will be complicated to
add linting and repo map support.
That is because aider relies on
[py-tree-sitter-languages](https://github.com/grantjenks/py-tree-sitter-languages)
If aider doesn't already support linting your language,
it will be more complicated to add support.
Aider relies on
[tree-sitter-language-pack](https://github.com/Goldziher/tree-sitter-language-pack)
to provide pre-packaged versions of tree-sitter
parsers for many languages.
Aider needs to be easy for users to install in many environments,
and it is probably too complex to add dependencies on
additional individual tree-sitter parsers.
language parsers.
This makes it easy for users to install aider in many diverse environments.
You probably need to work with that project to get your language
supported, which will easily allow aider to lint that language.
For repo-map support, you will also need to find or create a `tags.scm` file.
<!--[[[cog
from aider.repomap import get_supported_languages_md
@@ -215,8 +214,8 @@ cog.out(get_supported_languages_md())
| rst | .rst | | ✓ |
| ruby | .rb | ✓ | ✓ |
| rust | .rs | ✓ | ✓ |
| scala | .sc | | ✓ |
| scala | .scala | | ✓ |
| scala | .sc | | ✓ |
| scala | .scala | | ✓ |
| scheme | .scm | | ✓ |
| scheme | .ss | | ✓ |
| scss | .scss | | ✓ |

View File

@@ -128,6 +128,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
January 16, 2025.
April 12, 2025.
<!--[[[end]]]-->
</p>

View File

@@ -124,6 +124,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
March 25, 2025.
April 12, 2025.
<!--[[[end]]]-->
</p>

View File

@@ -73,6 +73,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
January 16, 2025.
April 12, 2025.
<!--[[[end]]]-->
</p>

View File

@@ -98,7 +98,7 @@ if result.returncode == 0:
date = datetime.datetime.fromtimestamp(timestamp)
cog.out(f"{date.strftime('%B %d, %Y.')}")
]]]-->
December 06, 2024.
April 12, 2025.
<!--[[[end]]]-->
</p>

View File

@@ -12,16 +12,16 @@ python -m pip install -U aider-chat
# Mac/Linux:
export AZURE_API_KEY=<key>
export AZURE_API_VERSION=2023-05-15
export AZURE_API_VERSION=2024-12-01-preview
export AZURE_API_BASE=https://myendpt.openai.azure.com
# Windows
setx AZURE_API_KEY <key>
setx AZURE_API_VERSION 2023-05-15
setx AZURE_API_VERSION 2024-12-01-preview
setx AZURE_API_BASE https://myendpt.openai.azure.com
# ... restart your shell after setx commands
aider --model azure/<your_deployment_name>
aider --model azure/<your_model_deployment_name>
# List models available from Azure
aider --list-models azure/
@@ -29,3 +29,9 @@ aider --list-models azure/
Note that aider will also use environment variables
like `AZURE_OPENAI_API_xxx`.
The `aider --list-models azure/` command will list all models that aider supports through Azure, not the models that are available for the provided endpoint.
When setting the model to use with `--model azure/<your_model_deployment_name>`, `<your_model_deployment_name>` is likely just the name of the model you have deployed to the endpoint for example `o3-mini` or `gpt-4o`. The screenshow below shows `o3-mini` and `gpt-4o` deployments in the Azure portal done under the `myendpt` resource.
![example azure deployment](/assets/azure-deployment.png)

View File

@@ -71,6 +71,7 @@ cog.out(model_list)
- claude-3-sonnet-20240229
- codestral/codestral-2405
- codestral/codestral-latest
- databricks/databricks-claude-3-7-sonnet
- deepseek/deepseek-chat
- deepseek/deepseek-coder
- deepseek/deepseek-reasoner

View File

@@ -87,6 +87,8 @@ Aider optimizes the repo map by
selecting the most important parts of the codebase
which will
fit into the active token budget.
The optimization identifies and maps the portions of the code base
which are most relevant to the current state of the chat.
The token budget is
influenced by the `--map-tokens` switch, which defaults to 1k tokens.

View File

@@ -5,7 +5,27 @@ nav_order: 28
# Models and API keys
You need to tell aider which LLM to use and provide an API key.
Aider needs to know which LLM model you would like to work with and which keys
to provide when accessing it via API.
## Defaults
If you don't explicitly name a model, aider will try to select a model
for you to work with.
First, aider will check which
[keys you have provided via the environment, config files, or command line arguments](https://aider.chat/docs/config/api-keys.html).
Based on the available keys, aider will select the best model to use.
If you have not provided any keys, aider will offer to help you connect to
[OpenRouter](http://openrouter.ai)
which provides both free and paid access to most popular LLMs.
Once connected, aider will select the best model available on OpenRouter
based on whether you have a free or paid account there.
## Specifying model & key
You can also tell aider which LLM to use and provide an API key.
The easiest way is to use the `--model` and `--api-key`
command line arguments, like this:

View File

@@ -99,6 +99,7 @@ The interactive prompt is built with [prompt-toolkit](https://github.com/prompt-
- `Ctrl-N` : Move down to the next history entry.
- `Ctrl-P` : Move up to the previous history entry.
- `Ctrl-R` : Reverse search in command history.
- `Ctrl-X Ctrl-E` : Open the current input in an external editor
- `Ctrl-Y` : Paste (yank) text that was previously cut.

View File

@@ -9,9 +9,9 @@ description: Using the code, architect, ask and help chat modes.
Aider has a few different chat modes:
- `code` - Aider will make changes to your code to satisfy your requests.
- `architect` - Aider will first propose a solution, then ask if you want it to turn that proposal into edits to your files.
- `ask` - Aider will answer questions about your code, but never edit it.
- `help` - Aider will answer questions about using aider, configuring, troubleshooting, etc.
- `ask` - Aider will discuss your code and answer questions about it, but never make changes.
- `architect` - Like code mode, aider will change your files. An architect model will propose changes and an editor model will translate that proposal into specific file edits.
- `help` - Aider will answer questions about aider: usage, configuration, troubleshooting, etc.
By default, aider starts in "code" mode. As you are talking, you can
send individual messages in a specific mode using
@@ -40,31 +40,104 @@ Or you can switch between coding modes using these commands without arguments:
Or you can launch aider in one of the modes with the `--chat-mode <mode>` switch.
There is also a special shortcut `--architect` to launch in `--chat-mode architect`.
The aider prompt will indicate the active mode:
```
> This is code mode.
ask> This is ask mode.
architect> This is architect mode.
```
## Ask/code workflow
A recommended workflow is to bounce back and forth between `/ask` and `/code` modes.
Use ask mode to discuss what you want to do, get suggestions or options from aider
and provide feedback on the approach.
Once aider understands the mission, switch to code mode to have it start editing
your files.
All the conversation and decision making from ask mode will
help ensure that the correct code changes are performed.
You can be very terse when you finally switch from ask to code mode.
Saying something as simple as "go ahead" in code mode will
have aider execute on the plan you've been discussing.
Here's an example with two ask mode messages to agree on the plan,
followed by two terse code mode messages to edit the code.
````
─────────────────────────────────────────────────────────────────────────────────────
Aider v0.79.0
Model: gemini/gemini-2.5-pro-exp-03-25 with diff-fenced edit format
> /ask What's the best thing to print if we're making a quick little demo program?
It sounds like you'd like to write a "Hello, world!" program. That's a very
popular phrase to use for small demo programs.
> /ask Yes, that's the idea. What are some more creative options?
1. "Greetings, Earthlings!"
2. "Beep boop, I am a computer!"
3. "To code, or not to code, that is the question."
4. "I think, therefore I print."
5. "01001000 01101001!" (Binary for "Hi!")
> do 4
hello.py
>>>>>>> SEARCH
=======
def main():
print("I think, therefore I print.")
<<<<<<< REPLACE
> ALL CAPS!
hello.py
>>>>>>> SEARCH
print("I think, therefore I print.")
=======
print("I THINK, THEREFORE I PRINT!")
<<<<<<< REPLACE
````
You can think of this ask/code workflow as a more fluid version of
architect mode, but working just with one model the whole time.
## Architect mode and the editor model
When you are in architect mode, aider sends your request to two models:
When you are in architect mode, aider sends your requests to two models:
1. First, it sends your request to the main active model.
The main model is configured with `/model`, `--model` or the shortcut switches like `--sonnet`.
After the main model replies, aider will offer to edit the files based on the response.
1. First, it sends your request to the main model which will act as an architect
to propose how to solve your coding request.
The main model is configured with `/model` or `--model`.
2. To edit the files, aider sends a second LLM request asking for specific code editing instructions.
This request goes to the "editor" model.
2. Aider then sends another request to an "editor model",
asking it to turn the architect's proposal into specific file editing instructions.
Aider has built in defaults to select an editor model based on your main model.
Or, you can choose an editor model yourself with `--editor-model <model>`.
Or, you can choose a specific editor model with `--editor-model <model>`.
Architect mode produces better results than code mode, but uses two LLM requests.
This probably makes it slower and more expensive than using code mode.
Certain LLMs aren't able to propose coding solutions *and*
specify detailed file edits all in one go.
For these models, architect mode can produce better results than code mode
by pairing them
with an editor model that is responsible for generating the file editing instructions.
But this uses two LLM requests,
which can take longer and increase costs.
Architect mode is especially useful with OpenAI's o1 models, which are strong at
reasoning but less capable at editing files.
Pairing an o1 architect with an editor model like GPT-4o or Sonnet will
give the best results.
But architect mode is also quite helpful when you use GPT-4o or Sonnet
at both the architect and the editor.
But architect mode can also be helpful when you use the same model
as both the architect and the editor.
Allowing the model two requests to solve the problem and edit the files
usually provides a better result.
can sometimes provide better results.
The editor model uses one of aider's edit formats to let the LLM
edit source files.
@@ -91,9 +164,9 @@ for more details.
#### /ask What is this repo?
This is the source code to the popular django package.
This is collection of python functions that compute various math functions.
#### /help How do I use ollama?
#### /help How do I use aider with ollama?
Run `aider --model ollama/<ollama-model>`.
See these docs for more info: https://aider.chat/docs/llms/ollama.html
@@ -122,8 +195,6 @@ builtin.
This way you don't have to maintain a custom factorial implementation,
and the builtin function is well optimized.
> Edit the files? (Y)es/(N)o [Yes]: Yes
```python
<<<<<<< SEARCH
def factorial(n):

View File

@@ -63,29 +63,29 @@ layout: none
<div class="stats-container">
<!--[[[cog
from scripts.badges import get_badges_html
from scripts.homepage import get_badges_html
text = get_badges_html()
cog.out(text)
]]]-->
<a href="https://github.com/Aider-AI/aider" class="github-badge badge-stars" title="Total number of GitHub stars the Aider project has received">
<span class="badge-label">⭐ GitHub Stars</span>
<span class="badge-value">30K</span>
<span class="badge-value">31K</span>
</a>
<a href="https://pypi.org/project/aider-chat/" class="github-badge badge-installs" title="Total number of installations via pip from PyPI">
<span class="badge-label">📦 Installs</span>
<span class="badge-value">1.7M</span>
<span class="badge-value">1.9M</span>
</a>
<div class="github-badge badge-tokens" title="Number of tokens processed weekly by Aider users">
<span class="badge-label">📈 Tokens/week</span>
<span class="badge-value">15B</span>
</div>
<a href="https://openrouter.ai/" class="github-badge badge-router" title="Aider's ranking among applications on the OpenRouter platform">
<a href="https://openrouter.ai/#options-menu" class="github-badge badge-router" title="Aider's ranking among applications on the OpenRouter platform">
<span class="badge-label">🏆 OpenRouter</span>
<span class="badge-value">Top 20</span>
</a>
<a href="/HISTORY.html" class="github-badge badge-coded" title="Percentage of the new code in Aider's last release written by Aider itself">
<span class="badge-label">🔄 Singularity</span>
<span class="badge-value">65%</span>
<span class="badge-value">86%</span>
</a>
<!--[[[end]]]-->
</div>
@@ -237,110 +237,186 @@ aider --model o3-mini --api-key openai=&lt;key&gt;</code></pre>
</div>
</section>
<script>
// All testimonials from the README
const testimonials = [
{
text: "The best free open source AI coding assistant.",
author: "IndyDevDan",
link: "https://youtu.be/YALpX8oOn78"
},
{
text: "The best AI coding assistant so far.",
author: "Matthew Berman",
link: "https://www.youtube.com/watch?v=df8afeb1FY8"
},
{
text: "Aider ... has easily quadrupled my coding productivity.",
author: "SOLAR_FIELDS",
link: "https://news.ycombinator.com/item?id=36212100"
},
{
text: "It's a cool workflow... Aider's ergonomics are perfect for me.",
author: "qup",
link: "https://news.ycombinator.com/item?id=38185326"
},
{
text: "It's really like having your senior developer live right in your Git repo - truly amazing!",
author: "rappster",
link: "https://github.com/Aider-AI/aider/issues/124"
},
{
text: "What an amazing tool. It's incredible.",
author: "valyagolev",
link: "https://github.com/Aider-AI/aider/issues/6#issue-1722897858"
},
{
text: "Aider is such an astounding thing!",
author: "cgrothaus",
link: "https://github.com/Aider-AI/aider/issues/82#issuecomment-1631876700"
},
{
text: "It was WAY faster than I would be getting off the ground and making the first few working versions.",
author: "Daniel Feldman",
link: "https://twitter.com/d_feldman/status/1662295077387923456"
},
{
text: "THANK YOU for Aider! It really feels like a glimpse into the future of coding.",
author: "derwiki",
link: "https://news.ycombinator.com/item?id=38205643"
},
{
text: "It's just amazing. It is freeing me to do things I felt were out my comfort zone before.",
author: "Dougie",
link: "https://discord.com/channels/1131200896827654144/1174002618058678323/1174084556257775656"
},
{
text: "This project is stellar.",
author: "funkytaco",
link: "https://github.com/Aider-AI/aider/issues/112#issuecomment-1637429008"
},
{
text: "Amazing project, definitely the best AI coding assistant I've used.",
author: "joshuavial",
link: "https://github.com/Aider-AI/aider/issues/84"
},
{
text: "I absolutely love using Aider ... It makes software development feel so much lighter as an experience.",
author: "principalideal0",
link: "https://discord.com/channels/1131200896827654144/1133421607499595858/1229689636012691468"
},
{
text: "I have been recovering from multiple shoulder surgeries ... and have used aider extensively. It has allowed me to continue productivity.",
author: "codeninja",
link: "https://www.reddit.com/r/OpenAI/s/nmNwkHy1zG"
},
{
text: "I am an aider addict. I'm getting so much more work done, but in less time.",
author: "dandandan",
link: "https://discord.com/channels/1131200896827654144/1131200896827654149/1135913253483069470"
},
{
text: "After wasting $100 on tokens trying to find something better, I'm back to Aider. It blows everything else out of the water hands down, there's no competition whatsoever.",
author: "SystemSculpt",
link: "https://discord.com/channels/1131200896827654144/1131200896827654149/1178736602797846548"
},
{
text: "Aider is amazing, coupled with Sonnet 3.5 it's quite mind blowing.",
author: "Josh Dingus",
link: "https://discord.com/channels/1131200896827654144/1133060684540813372/1262374225298198548"
},
{
text: "Hands down, this is the best AI coding assistant tool so far.",
author: "IndyDevDan",
link: "https://www.youtube.com/watch?v=MPYFPvxfGZs"
},
{
text: "[Aider] changed my daily coding workflows. It's mind-blowing how a single Python application can change your life.",
author: "maledorak",
link: "https://discord.com/channels/1131200896827654144/1131200896827654149/1258453375620747264"
},
{
text: "Best agent for actual dev work in existing codebases.",
author: "Nick Dobos",
link: "https://twitter.com/NickADobos/status/1690408967963652097?s=20"
}
];
<style>
.testimonial-card {
opacity: 1;
transform: translateY(0);
transition: opacity 0.4s ease, transform 0.4s ease, height 0.4s ease;
display: flex;
flex-direction: column;
justify-content: flex-start;
overflow: hidden;
padding: 20px;
box-sizing: border-box;
}
.testimonial-text {
line-height: 1.5;
margin-bottom: 15px;
}
.testimonial-author {
margin-top: auto;
padding-top: 10px;
}
</style>
<!--[[[cog
from scripts.homepage import get_testimonials_js
text = get_testimonials_js()
cog.out(text)
]]]-->
<script>
const testimonials = [
{
text: "The best free open source AI coding assistant.",
author: "IndyDevDan",
link: "https://youtu.be/YALpX8oOn78"
},
{
text: "The best AI coding assistant so far.",
author: "Matthew Berman",
link: "https://www.youtube.com/watch?v=df8afeb1FY8"
},
{
text: "Aider ... has easily quadrupled my coding productivity.",
author: "SOLAR_FIELDS",
link: "https://news.ycombinator.com/item?id=36212100"
},
{
text: "It's a cool workflow... Aider's ergonomics are perfect for me.",
author: "qup",
link: "https://news.ycombinator.com/item?id=38185326"
},
{
text: "It's really like having your senior developer live right in your Git repo - truly amazing!",
author: "rappster",
link: "https://github.com/Aider-AI/aider/issues/124"
},
{
text: "What an amazing tool. It's incredible.",
author: "valyagolev",
link: "https://github.com/Aider-AI/aider/issues/6#issue-1722897858"
},
{
text: "Aider is such an astounding thing!",
author: "cgrothaus",
link: "https://github.com/Aider-AI/aider/issues/82#issuecomment-1631876700"
},
{
text: "It was WAY faster than I would be getting off the ground and making the first few working versions.",
author: "Daniel Feldman",
link: "https://twitter.com/d_feldman/status/1662295077387923456"
},
{
text: "THANK YOU for Aider! It really feels like a glimpse into the future of coding.",
author: "derwiki",
link: "https://news.ycombinator.com/item?id=38205643"
},
{
text: "It's just amazing. It is freeing me to do things I felt were out my comfort zone before.",
author: "Dougie",
link: "https://discord.com/channels/1131200896827654144/1174002618058678323/1174084556257775656"
},
{
text: "This project is stellar.",
author: "funkytaco",
link: "https://github.com/Aider-AI/aider/issues/112#issuecomment-1637429008"
},
{
text: "Amazing project, definitely the best AI coding assistant I've used.",
author: "joshuavial",
link: "https://github.com/Aider-AI/aider/issues/84"
},
{
text: "I absolutely love using Aider ... It makes software development feel so much lighter as an experience.",
author: "principalideal0",
link: "https://discord.com/channels/1131200896827654144/1133421607499595858/1229689636012691468"
},
{
text: "I have been recovering from multiple shoulder surgeries ... and have used aider extensively. It has allowed me to continue productivity.",
author: "codeninja",
link: "https://www.reddit.com/r/OpenAI/s/nmNwkHy1zG"
},
{
text: "I am an aider addict. I'm getting so much more work done, but in less time.",
author: "dandandan",
link: "https://discord.com/channels/1131200896827654144/1131200896827654149/1135913253483069470"
},
{
text: "After wasting $100 on tokens trying to find something better, I'm back to Aider. It blows everything else out of the water hands down, there's no competition whatsoever.",
author: "SystemSculpt",
link: "https://discord.com/channels/1131200896827654144/1131200896827654149/1178736602797846548"
},
{
text: "Aider is amazing, coupled with Sonnet 3.5 it's quite mind blowing.",
author: "Josh Dingus",
link: "https://discord.com/channels/1131200896827654144/1133060684540813372/1262374225298198548"
},
{
text: "Hands down, this is the best AI coding assistant tool so far.",
author: "IndyDevDan",
link: "https://www.youtube.com/watch?v=MPYFPvxfGZs"
},
{
text: "[Aider] changed my daily coding workflows. It's mind-blowing how a single Python application can change your life.",
author: "maledorak",
link: "https://discord.com/channels/1131200896827654144/1131200896827654149/1258453375620747264"
},
{
text: "Best agent for actual dev work in existing codebases.",
author: "Nick Dobos",
link: "https://twitter.com/NickADobos/status/1690408967963652097?s=20"
},
{
text: "One of my favorite pieces of software. Blazing trails on new paradigms!",
author: "Chris Wall",
link: "https://x.com/chris65536/status/1905053299251798432"
},
{
text: "Aider has been revolutionary for me and my work.",
author: "Starry Hope",
link: "https://x.com/starryhopeblog/status/1904985812137132056"
},
{
text: "Try aider! One of the best ways to vibe code.",
author: "Chris Wall",
link: "https://x.com/Chris65536/status/1905053418961391929"
},
{
text: "Aider is hands down the best. And it's free and opensource.",
author: "AriyaSavakaLurker",
link: "https://www.reddit.com/r/ChatGPTCoding/comments/1ik16y6/whats_your_take_on_aider/mbip39n/"
},
{
text: "Aider is also my best friend.",
author: "jzn21",
link: "https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27dcnb/"
},
{
text: "Try Aider, it's worth it.",
author: "jorgejhms",
link: "https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27cp99/"
},
{
text: "I like aider :)",
author: "Chenwei Cui",
link: "https://x.com/ccui42/status/1904965344999145698"
},
{
text: "Aider is the precision tool of LLM code gen. It is minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control.",
author: "Reilly Sweetland",
link: "https://x.com/rsweetland/status/1904963807237259586"
},
{
text: "Cannot believe aider vibe coded a 650 LOC feature across service and cli today in 1 shot.",
author: "autopoietist",
link: "https://discord.com/channels/1131200896827654144/1131200896827654149/1355675042259796101"
}
];
</script>
<!--[[[end]]]-->
<script>
// Function to shuffle array
function shuffleArray(array) {
@@ -378,6 +454,32 @@ aider --model o3-mini --api-key openai=&lt;key&gt;</code></pre>
}
container.innerHTML = html;
// Set a fixed height for all testimonial cards after they've been created
setTimeout(setMaxTestimonialHeight, 50);
}
// Calculate and set the maximum height for all testimonial cards
function setMaxTestimonialHeight() {
const cards = document.querySelectorAll('.testimonial-card');
if (!cards.length) return;
// Find the maximum height among all cards
let maxHeight = 0;
cards.forEach(card => {
// Temporarily remove any set height to get the natural height
card.style.height = 'auto';
const height = card.offsetHeight;
maxHeight = Math.max(maxHeight, height);
});
// Add a bit of extra padding to ensure enough space
maxHeight += 20;
// Set the max height on all cards
cards.forEach(card => {
card.style.height = `${maxHeight}px`;
});
}
// Function to update a single testimonial
@@ -423,19 +525,30 @@ aider --model o3-mini --api-key openai=&lt;key&gt;</code></pre>
// Update the displayed testimonial
const testimonialElement = document.getElementById(`testimonial-${index}`);
if (testimonialElement) {
// Start the flip animation
testimonialElement.style.transform = "rotateY(90deg)";
// Start fade out with slight upward movement
testimonialElement.style.transition = "opacity 0.4s ease, transform 0.4s ease";
testimonialElement.style.opacity = "0";
testimonialElement.style.transform = "translateY(10px)";
// Update content when card is perpendicular to view (hidden)
// Update content when fully faded out
setTimeout(() => {
// Keep the current height during the content swap
const currentHeight = testimonialElement.style.height;
testimonialElement.innerHTML = `
<p class="testimonial-text">${newTestimonial.text}</p>
<p class="testimonial-author">— <a href="${newTestimonial.link}" target="_blank">${newTestimonial.author}</a></p>
`;
// Complete the flip
testimonialElement.style.transform = "rotateY(0deg)";
}, 300);
// Ensure the height remains consistent
testimonialElement.style.height = currentHeight;
// Start fade in
setTimeout(() => {
testimonialElement.style.opacity = "1";
testimonialElement.style.transform = "translateY(0)";
}, 50);
}, 400);
}
}
@@ -471,7 +584,7 @@ aider --model o3-mini --api-key openai=&lt;key&gt;</code></pre>
// Store this index as the last flipped
lastFlippedIndex = randomIndex;
}, 3000);
}, 1500);
});
</script>

View File

@@ -82,6 +82,7 @@ You can run `./benchmark/benchmark.py --help` for a list of all the arguments, b
- `--threads` specifies how many exercises to benchmark in parallel. Start with a single thread if you are working out the kinks on your benchmarking setup or working with a new model, etc. Once you are getting reliable results, you can speed up the process by running with more threads. 10 works well against the OpenAI APIs.
- `--num-tests` specifies how many of the tests to run before stopping. This is another way to start gently as you debug your benchmarking setup.
- `--keywords` filters the tests to run to only the ones whose name match the supplied argument (similar to `pytest -k xxxx`).
- `--read-model-settings=<filename.yml>` specify model settings, see here: https://aider.chat/docs/config/adv-model-settings.html#model-settings
### Benchmark report

View File

@@ -4,7 +4,7 @@ aiohappyeyeballs==2.6.1
# via
# -c requirements/common-constraints.txt
# aiohttp
aiohttp==3.11.14
aiohttp==3.11.16
# via
# -c requirements/common-constraints.txt
# litellm
@@ -77,7 +77,7 @@ filelock==3.18.0
# via
# -c requirements/common-constraints.txt
# huggingface-hub
flake8==7.1.2
flake8==7.2.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
@@ -86,7 +86,7 @@ frozenlist==1.5.0
# -c requirements/common-constraints.txt
# aiohttp
# aiosignal
fsspec==2025.3.0
fsspec==2025.3.2
# via
# -c requirements/common-constraints.txt
# huggingface-hub
@@ -106,7 +106,7 @@ h11==0.14.0
# via
# -c requirements/common-constraints.txt
# httpcore
httpcore==1.0.7
httpcore==1.0.8
# via
# -c requirements/common-constraints.txt
# httpx
@@ -115,7 +115,7 @@ httpx==0.28.1
# -c requirements/common-constraints.txt
# litellm
# openai
huggingface-hub==0.29.3
huggingface-hub==0.30.2
# via
# -c requirements/common-constraints.txt
# tokenizers
@@ -143,7 +143,7 @@ jiter==0.9.0
# via
# -c requirements/common-constraints.txt
# openai
json5==0.10.0
json5==0.12.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
@@ -156,7 +156,7 @@ jsonschema-specifications==2024.10.1
# via
# -c requirements/common-constraints.txt
# jsonschema
litellm==1.63.11
litellm==1.65.7
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
@@ -184,7 +184,7 @@ monotonic==1.6
# via
# -c requirements/common-constraints.txt
# posthog
multidict==6.2.0
multidict==6.4.3
# via
# -c requirements/common-constraints.txt
# aiohttp
@@ -198,7 +198,7 @@ numpy==1.26.4
# -c requirements/common-constraints.txt
# scipy
# soundfile
openai==1.66.3
openai==1.73.0
# via
# -c requirements/common-constraints.txt
# litellm
@@ -224,7 +224,7 @@ pip==25.0.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
posthog==3.21.0
posthog==3.24.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
@@ -232,7 +232,7 @@ prompt-toolkit==3.0.50
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
propcache==0.3.0
propcache==0.3.1
# via
# -c requirements/common-constraints.txt
# aiohttp
@@ -245,7 +245,7 @@ ptyprocess==0.7.0
# via
# -c requirements/common-constraints.txt
# pexpect
pycodestyle==2.12.1
pycodestyle==2.13.0
# via
# -c requirements/common-constraints.txt
# flake8
@@ -253,12 +253,12 @@ pycparser==2.22
# via
# -c requirements/common-constraints.txt
# cffi
pydantic==2.10.6
pydantic==2.11.3
# via
# -c requirements/common-constraints.txt
# litellm
# openai
pydantic-core==2.27.2
pydantic-core==2.33.1
# via
# -c requirements/common-constraints.txt
# pydantic
@@ -266,7 +266,7 @@ pydub==0.25.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
pyflakes==3.2.0
pyflakes==3.3.2
# via
# -c requirements/common-constraints.txt
# flake8
@@ -286,7 +286,7 @@ python-dateutil==2.9.0.post0
# via
# -c requirements/common-constraints.txt
# posthog
python-dotenv==1.0.1
python-dotenv==1.1.0
# via
# -c requirements/common-constraints.txt
# litellm
@@ -311,11 +311,11 @@ requests==2.32.3
# mixpanel
# posthog
# tiktoken
rich==13.9.4
rich==14.0.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
rpds-py==0.23.1
rpds-py==0.24.0
# via
# -c requirements/common-constraints.txt
# jsonschema
@@ -379,7 +379,7 @@ tree-sitter-embedded-template==0.23.2
# via
# -c requirements/common-constraints.txt
# tree-sitter-language-pack
tree-sitter-language-pack==0.6.1
tree-sitter-language-pack==0.7.1
# via
# -c requirements/common-constraints.txt
# grep-ast
@@ -387,7 +387,7 @@ tree-sitter-yaml==0.7.0
# via
# -c requirements/common-constraints.txt
# tree-sitter-language-pack
typing-extensions==4.12.2
typing-extensions==4.13.2
# via
# -c requirements/common-constraints.txt
# anyio
@@ -397,12 +397,17 @@ typing-extensions==4.12.2
# pydantic
# pydantic-core
# referencing
urllib3==2.3.0
# typing-inspection
typing-inspection==0.4.0
# via
# -c requirements/common-constraints.txt
# pydantic
urllib3==2.4.0
# via
# -c requirements/common-constraints.txt
# mixpanel
# requests
watchfiles==1.0.4
watchfiles==1.0.5
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements.in
@@ -410,7 +415,7 @@ wcwidth==0.2.13
# via
# -c requirements/common-constraints.txt
# prompt-toolkit
yarl==1.18.3
yarl==1.19.0
# via
# -c requirements/common-constraints.txt
# aiohttp

View File

@@ -2,7 +2,7 @@
# uv pip compile --no-strip-extras --output-file=requirements/common-constraints.txt requirements/requirements.in requirements/requirements-browser.in requirements/requirements-dev.in requirements/requirements-help.in requirements/requirements-playwright.in
aiohappyeyeballs==2.6.1
# via aiohttp
aiohttp==3.11.14
aiohttp==3.11.16
# via
# huggingface-hub
# litellm
@@ -27,6 +27,8 @@ backoff==2.2.1
# via
# -r requirements/requirements.in
# posthog
banks==2.1.1
# via llama-index-core
beautifulsoup4==4.13.3
# via -r requirements/requirements.in
blinker==1.9.0
@@ -34,7 +36,9 @@ blinker==1.9.0
build==1.2.2.post1
# via pip-tools
cachetools==5.5.2
# via streamlit
# via
# google-auth
# streamlit
certifi==2025.1.31
# via
# httpcore
@@ -59,6 +63,8 @@ codespell==2.4.1
# via -r requirements/requirements-dev.in
cogapp==3.4.1
# via -r requirements/requirements-dev.in
colorama==0.4.6
# via griffe
configargparse==1.7
# via -r requirements/requirements.in
contourpy==1.3.1
@@ -68,7 +74,9 @@ cycler==0.12.1
dataclasses-json==0.6.7
# via llama-index-core
deprecated==1.2.18
# via llama-index-core
# via
# banks
# llama-index-core
diff-match-patch==20241021
# via -r requirements/requirements.in
dill==0.3.9
@@ -93,15 +101,15 @@ filelock==3.18.0
# virtualenv
filetype==1.2.0
# via llama-index-core
flake8==7.1.2
flake8==7.2.0
# via -r requirements/requirements.in
fonttools==4.56.0
fonttools==4.57.0
# via matplotlib
frozenlist==1.5.0
# via
# aiohttp
# aiosignal
fsspec==2025.3.0
fsspec==2025.3.2
# via
# huggingface-hub
# llama-index-core
@@ -112,22 +120,51 @@ gitpython==3.1.44
# via
# -r requirements/requirements.in
# streamlit
google-api-core[grpc]==2.24.2
# via
# google-cloud-bigquery
# google-cloud-core
google-auth==2.38.0
# via
# google-api-core
# google-cloud-bigquery
# google-cloud-core
google-cloud-bigquery==3.31.0
# via -r requirements/requirements-dev.in
google-cloud-core==2.4.3
# via google-cloud-bigquery
google-crc32c==1.7.1
# via google-resumable-media
google-resumable-media==2.7.2
# via google-cloud-bigquery
googleapis-common-protos==1.69.2
# via
# google-api-core
# grpcio-status
greenlet==3.1.1
# via
# playwright
# sqlalchemy
grep-ast==0.8.1
# via -r requirements/requirements.in
griffe==1.7.2
# via banks
grpcio==1.71.0
# via
# google-api-core
# grpcio-status
grpcio-status==1.71.0
# via google-api-core
h11==0.14.0
# via httpcore
httpcore==1.0.7
httpcore==1.0.8
# via httpx
httpx==0.28.1
# via
# litellm
# llama-index-core
# openai
huggingface-hub[inference]==0.29.3
huggingface-hub[inference]==0.30.2
# via
# llama-index-embeddings-huggingface
# sentence-transformers
@@ -149,11 +186,12 @@ importlib-metadata==7.2.1
# litellm
importlib-resources==6.5.2
# via -r requirements/requirements.in
iniconfig==2.0.0
iniconfig==2.1.0
# via pytest
jinja2==3.1.6
# via
# altair
# banks
# litellm
# pydeck
# torch
@@ -163,7 +201,7 @@ joblib==1.4.2
# via
# nltk
# scikit-learn
json5==0.10.0
json5==0.12.0
# via -r requirements/requirements.in
jsonschema==4.23.0
# via
@@ -174,13 +212,13 @@ jsonschema-specifications==2024.10.1
# via jsonschema
kiwisolver==1.4.8
# via matplotlib
litellm==1.63.11
litellm==1.65.7
# via -r requirements/requirements.in
llama-index-core==0.12.24.post1
llama-index-core==0.12.26
# via
# -r requirements/requirements-help.in
# llama-index-embeddings-huggingface
llama-index-embeddings-huggingface==0.5.2
llama-index-embeddings-huggingface==0.5.3
# via -r requirements/requirements-help.in
lox==0.13.0
# via -r requirements/requirements-dev.in
@@ -202,7 +240,7 @@ monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.2.0
multidict==6.4.3
# via
# aiohttp
# yarl
@@ -210,7 +248,7 @@ multiprocess==0.70.17
# via pathos
mypy-extensions==1.0.0
# via typing-inspect
narwhals==1.31.0
narwhals==1.34.1
# via altair
nest-asyncio==1.6.0
# via llama-index-core
@@ -236,13 +274,14 @@ numpy==1.26.4
# soundfile
# streamlit
# transformers
openai==1.66.3
openai==1.73.0
# via litellm
packaging==24.2
# via
# -r requirements/requirements.in
# altair
# build
# google-cloud-bigquery
# huggingface-hub
# marshmallow
# matplotlib
@@ -274,44 +313,60 @@ pip==25.0.1
# pip-tools
pip-tools==7.4.1
# via -r requirements/requirements-dev.in
platformdirs==4.3.6
# via virtualenv
playwright==1.50.0
platformdirs==4.3.7
# via
# banks
# virtualenv
playwright==1.51.0
# via -r requirements/requirements-playwright.in
pluggy==1.5.0
# via pytest
posthog==3.21.0
posthog==3.24.1
# via -r requirements/requirements.in
pox==0.3.5
# via pathos
ppft==1.7.6.9
# via pathos
pre-commit==4.1.0
pre-commit==4.2.0
# via -r requirements/requirements-dev.in
prompt-toolkit==3.0.50
# via -r requirements/requirements.in
propcache==0.3.0
propcache==0.3.1
# via
# aiohttp
# yarl
protobuf==5.29.3
# via streamlit
proto-plus==1.26.1
# via google-api-core
protobuf==5.29.4
# via
# google-api-core
# googleapis-common-protos
# grpcio-status
# proto-plus
# streamlit
psutil==7.0.0
# via -r requirements/requirements.in
ptyprocess==0.7.0
# via pexpect
pyarrow==19.0.1
# via streamlit
pycodestyle==2.12.1
pyasn1==0.6.1
# via
# pyasn1-modules
# rsa
pyasn1-modules==0.4.2
# via google-auth
pycodestyle==2.13.0
# via flake8
pycparser==2.22
# via cffi
pydantic==2.10.6
pydantic==2.11.3
# via
# banks
# litellm
# llama-index-core
# openai
pydantic-core==2.27.2
pydantic-core==2.33.1
# via pydantic
pydeck==0.9.1
# via streamlit
@@ -319,13 +374,13 @@ pydub==0.25.1
# via -r requirements/requirements.in
pyee==12.1.1
# via playwright
pyflakes==3.2.0
pyflakes==3.3.2
# via flake8
pygments==2.19.1
# via rich
pypandoc==1.15
# via -r requirements/requirements.in
pyparsing==3.2.1
pyparsing==3.2.3
# via matplotlib
pyperclip==1.9.0
# via -r requirements/requirements.in
@@ -341,12 +396,13 @@ pytest-env==1.1.5
# via -r requirements/requirements-dev.in
python-dateutil==2.9.0.post0
# via
# google-cloud-bigquery
# matplotlib
# pandas
# posthog
python-dotenv==1.0.1
python-dotenv==1.1.0
# via litellm
pytz==2025.1
pytz==2025.2
# via pandas
pyyaml==6.0.2
# via
@@ -366,6 +422,8 @@ regex==2024.11.6
# transformers
requests==2.32.3
# via
# google-api-core
# google-cloud-bigquery
# huggingface-hub
# llama-index-core
# mixpanel
@@ -373,14 +431,16 @@ requests==2.32.3
# streamlit
# tiktoken
# transformers
rich==13.9.4
rich==14.0.0
# via
# -r requirements/requirements.in
# typer
rpds-py==0.23.1
rpds-py==0.24.0
# via
# jsonschema
# referencing
rsa==4.9
# via google-auth
safetensors==0.5.3
# via transformers
scikit-learn==1.6.1
@@ -392,9 +452,9 @@ scipy==1.13.1
# sentence-transformers
semver==3.0.4
# via -r requirements/requirements-dev.in
sentence-transformers==3.4.1
sentence-transformers==4.0.2
# via llama-index-embeddings-huggingface
setuptools==76.0.0
setuptools==78.1.0
# via pip-tools
shellingham==1.5.4
# via typer
@@ -417,13 +477,13 @@ soundfile==0.13.1
# via -r requirements/requirements.in
soupsieve==2.6
# via beautifulsoup4
sqlalchemy[asyncio]==2.0.39
sqlalchemy[asyncio]==2.0.40
# via llama-index-core
streamlit==1.43.2
streamlit==1.44.1
# via -r requirements/requirements-browser.in
sympy==1.13.3
# via torch
tenacity==9.0.0
tenacity==9.1.2
# via
# llama-index-core
# streamlit
@@ -453,7 +513,7 @@ tqdm==4.67.1
# openai
# sentence-transformers
# transformers
transformers==4.49.0
transformers==4.51.2
# via sentence-transformers
tree-sitter==0.24.0
# via tree-sitter-language-pack
@@ -461,13 +521,13 @@ tree-sitter-c-sharp==0.23.1
# via tree-sitter-language-pack
tree-sitter-embedded-template==0.23.2
# via tree-sitter-language-pack
tree-sitter-language-pack==0.6.1
tree-sitter-language-pack==0.7.1
# via grep-ast
tree-sitter-yaml==0.7.0
# via tree-sitter-language-pack
typer==0.15.2
# via -r requirements/requirements-dev.in
typing-extensions==4.12.2
typing-extensions==4.13.2
# via
# altair
# anyio
@@ -479,26 +539,30 @@ typing-extensions==4.12.2
# pydantic-core
# pyee
# referencing
# sentence-transformers
# sqlalchemy
# streamlit
# torch
# typer
# typing-inspect
# typing-inspection
typing-inspect==0.9.0
# via
# dataclasses-json
# llama-index-core
tzdata==2025.1
typing-inspection==0.4.0
# via pydantic
tzdata==2025.2
# via pandas
urllib3==2.3.0
urllib3==2.4.0
# via
# mixpanel
# requests
uv==0.6.6
uv==0.6.14
# via -r requirements/requirements-dev.in
virtualenv==20.29.3
virtualenv==20.30.0
# via pre-commit
watchfiles==1.0.4
watchfiles==1.0.5
# via -r requirements/requirements.in
wcwidth==0.2.13
# via prompt-toolkit
@@ -508,7 +572,7 @@ wrapt==1.17.2
# via
# deprecated
# llama-index-core
yarl==1.18.3
yarl==1.19.0
# via aiohttp
zipp==3.21.0
# via importlib-metadata

View File

@@ -58,7 +58,7 @@ markupsafe==3.0.2
# via
# -c requirements/common-constraints.txt
# jinja2
narwhals==1.31.0
narwhals==1.34.1
# via
# -c requirements/common-constraints.txt
# altair
@@ -81,7 +81,7 @@ pillow==11.1.0
# via
# -c requirements/common-constraints.txt
# streamlit
protobuf==5.29.3
protobuf==5.29.4
# via
# -c requirements/common-constraints.txt
# streamlit
@@ -97,7 +97,7 @@ python-dateutil==2.9.0.post0
# via
# -c requirements/common-constraints.txt
# pandas
pytz==2025.1
pytz==2025.2
# via
# -c requirements/common-constraints.txt
# pandas
@@ -110,7 +110,7 @@ requests==2.32.3
# via
# -c requirements/common-constraints.txt
# streamlit
rpds-py==0.23.1
rpds-py==0.24.0
# via
# -c requirements/common-constraints.txt
# jsonschema
@@ -123,11 +123,11 @@ smmap==5.0.2
# via
# -c requirements/common-constraints.txt
# gitdb
streamlit==1.43.2
streamlit==1.44.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-browser.in
tenacity==9.0.0
tenacity==9.1.2
# via
# -c requirements/common-constraints.txt
# streamlit
@@ -139,17 +139,17 @@ tornado==6.4.2
# via
# -c requirements/common-constraints.txt
# streamlit
typing-extensions==4.12.2
typing-extensions==4.13.2
# via
# -c requirements/common-constraints.txt
# altair
# referencing
# streamlit
tzdata==2025.1
tzdata==2025.2
# via
# -c requirements/common-constraints.txt
# pandas
urllib3==2.3.0
urllib3==2.4.0
# via
# -c requirements/common-constraints.txt
# requests

View File

@@ -11,3 +11,4 @@ cogapp
semver
codespell
uv
google-cloud-bigquery

View File

@@ -4,10 +4,22 @@ build==1.2.2.post1
# via
# -c requirements/common-constraints.txt
# pip-tools
cachetools==5.5.2
# via
# -c requirements/common-constraints.txt
# google-auth
certifi==2025.1.31
# via
# -c requirements/common-constraints.txt
# requests
cfgv==3.4.0
# via
# -c requirements/common-constraints.txt
# pre-commit
charset-normalizer==3.4.1
# via
# -c requirements/common-constraints.txt
# requests
click==8.1.8
# via
# -c requirements/common-constraints.txt
@@ -42,19 +54,64 @@ filelock==3.18.0
# via
# -c requirements/common-constraints.txt
# virtualenv
fonttools==4.56.0
fonttools==4.57.0
# via
# -c requirements/common-constraints.txt
# matplotlib
google-api-core[grpc]==2.24.2
# via
# -c requirements/common-constraints.txt
# google-cloud-bigquery
# google-cloud-core
google-auth==2.38.0
# via
# -c requirements/common-constraints.txt
# google-api-core
# google-cloud-bigquery
# google-cloud-core
google-cloud-bigquery==3.31.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
google-cloud-core==2.4.3
# via
# -c requirements/common-constraints.txt
# google-cloud-bigquery
google-crc32c==1.7.1
# via
# -c requirements/common-constraints.txt
# google-resumable-media
google-resumable-media==2.7.2
# via
# -c requirements/common-constraints.txt
# google-cloud-bigquery
googleapis-common-protos==1.69.2
# via
# -c requirements/common-constraints.txt
# google-api-core
# grpcio-status
grpcio==1.71.0
# via
# -c requirements/common-constraints.txt
# google-api-core
# grpcio-status
grpcio-status==1.71.0
# via
# -c requirements/common-constraints.txt
# google-api-core
identify==2.6.9
# via
# -c requirements/common-constraints.txt
# pre-commit
idna==3.10
# via
# -c requirements/common-constraints.txt
# requests
imgcat==0.6.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
iniconfig==2.0.0
iniconfig==2.1.0
# via
# -c requirements/common-constraints.txt
# pytest
@@ -96,6 +153,7 @@ packaging==24.2
# via
# -c requirements/common-constraints.txt
# build
# google-cloud-bigquery
# matplotlib
# pytest
pandas==2.2.3
@@ -118,7 +176,7 @@ pip-tools==7.4.1
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
platformdirs==4.3.6
platformdirs==4.3.7
# via
# -c requirements/common-constraints.txt
# virtualenv
@@ -134,15 +192,35 @@ ppft==1.7.6.9
# via
# -c requirements/common-constraints.txt
# pathos
pre-commit==4.1.0
pre-commit==4.2.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
proto-plus==1.26.1
# via
# -c requirements/common-constraints.txt
# google-api-core
protobuf==5.29.4
# via
# -c requirements/common-constraints.txt
# google-api-core
# googleapis-common-protos
# grpcio-status
# proto-plus
pyasn1==0.6.1
# via
# -c requirements/common-constraints.txt
# pyasn1-modules
# rsa
pyasn1-modules==0.4.2
# via
# -c requirements/common-constraints.txt
# google-auth
pygments==2.19.1
# via
# -c requirements/common-constraints.txt
# rich
pyparsing==3.2.1
pyparsing==3.2.3
# via
# -c requirements/common-constraints.txt
# matplotlib
@@ -163,9 +241,10 @@ pytest-env==1.1.5
python-dateutil==2.9.0.post0
# via
# -c requirements/common-constraints.txt
# google-cloud-bigquery
# matplotlib
# pandas
pytz==2025.1
pytz==2025.2
# via
# -c requirements/common-constraints.txt
# pandas
@@ -173,15 +252,24 @@ pyyaml==6.0.2
# via
# -c requirements/common-constraints.txt
# pre-commit
rich==13.9.4
requests==2.32.3
# via
# -c requirements/common-constraints.txt
# google-api-core
# google-cloud-bigquery
rich==14.0.0
# via
# -c requirements/common-constraints.txt
# typer
rsa==4.9
# via
# -c requirements/common-constraints.txt
# google-auth
semver==3.0.4
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
setuptools==76.0.0
setuptools==78.1.0
# via
# -c requirements/common-constraints.txt
# pip-tools
@@ -197,19 +285,23 @@ typer==0.15.2
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
typing-extensions==4.12.2
typing-extensions==4.13.2
# via
# -c requirements/common-constraints.txt
# typer
tzdata==2025.1
tzdata==2025.2
# via
# -c requirements/common-constraints.txt
# pandas
uv==0.6.6
urllib3==2.4.0
# via
# -c requirements/common-constraints.txt
# requests
uv==0.6.14
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-dev.in
virtualenv==20.29.3
virtualenv==20.30.0
# via
# -c requirements/common-constraints.txt
# pre-commit

View File

@@ -1,4 +1,3 @@
llama-index-core
llama-index-embeddings-huggingface
# Because sentence-transformers doesn't like >=2
@@ -6,4 +5,7 @@ numpy<2
# Mac x86 only supports 2.2.2
# https://discuss.pytorch.org/t/why-no-macosx-x86-64-build-after-torch-2-2-2-cp39-none-macosx-10-9-x86-64-whl/204546/2
torch==2.2.2
torch==2.2.2
# Later versions break test_help in GitHub Actions on Windows and Ubuntu
llama-index-core==0.12.26

View File

@@ -4,7 +4,7 @@ aiohappyeyeballs==2.6.1
# via
# -c requirements/common-constraints.txt
# aiohttp
aiohttp==3.11.14
aiohttp==3.11.16
# via
# -c requirements/common-constraints.txt
# huggingface-hub
@@ -25,6 +25,10 @@ attrs==25.3.0
# via
# -c requirements/common-constraints.txt
# aiohttp
banks==2.1.1
# via
# -c requirements/common-constraints.txt
# llama-index-core
certifi==2025.1.31
# via
# -c requirements/common-constraints.txt
@@ -39,6 +43,10 @@ click==8.1.8
# via
# -c requirements/common-constraints.txt
# nltk
colorama==0.4.6
# via
# -c requirements/common-constraints.txt
# griffe
dataclasses-json==0.6.7
# via
# -c requirements/common-constraints.txt
@@ -46,6 +54,7 @@ dataclasses-json==0.6.7
deprecated==1.2.18
# via
# -c requirements/common-constraints.txt
# banks
# llama-index-core
dirtyjson==1.0.8
# via
@@ -66,7 +75,7 @@ frozenlist==1.5.0
# -c requirements/common-constraints.txt
# aiohttp
# aiosignal
fsspec==2025.3.0
fsspec==2025.3.2
# via
# -c requirements/common-constraints.txt
# huggingface-hub
@@ -76,11 +85,15 @@ greenlet==3.1.1
# via
# -c requirements/common-constraints.txt
# sqlalchemy
griffe==1.7.2
# via
# -c requirements/common-constraints.txt
# banks
h11==0.14.0
# via
# -c requirements/common-constraints.txt
# httpcore
httpcore==1.0.7
httpcore==1.0.8
# via
# -c requirements/common-constraints.txt
# httpx
@@ -88,7 +101,7 @@ httpx==0.28.1
# via
# -c requirements/common-constraints.txt
# llama-index-core
huggingface-hub[inference]==0.29.3
huggingface-hub[inference]==0.30.2
# via
# -c requirements/common-constraints.txt
# llama-index-embeddings-huggingface
@@ -105,18 +118,19 @@ idna==3.10
jinja2==3.1.6
# via
# -c requirements/common-constraints.txt
# banks
# torch
joblib==1.4.2
# via
# -c requirements/common-constraints.txt
# nltk
# scikit-learn
llama-index-core==0.12.24.post1
llama-index-core==0.12.26
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-help.in
# llama-index-embeddings-huggingface
llama-index-embeddings-huggingface==0.5.2
llama-index-embeddings-huggingface==0.5.3
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-help.in
@@ -132,7 +146,7 @@ mpmath==1.3.0
# via
# -c requirements/common-constraints.txt
# sympy
multidict==6.2.0
multidict==6.4.3
# via
# -c requirements/common-constraints.txt
# aiohttp
@@ -173,16 +187,21 @@ pillow==11.1.0
# -c requirements/common-constraints.txt
# llama-index-core
# sentence-transformers
propcache==0.3.0
platformdirs==4.3.7
# via
# -c requirements/common-constraints.txt
# banks
propcache==0.3.1
# via
# -c requirements/common-constraints.txt
# aiohttp
# yarl
pydantic==2.10.6
pydantic==2.11.3
# via
# -c requirements/common-constraints.txt
# banks
# llama-index-core
pydantic-core==2.27.2
pydantic-core==2.33.1
# via
# -c requirements/common-constraints.txt
# pydantic
@@ -218,7 +237,7 @@ scipy==1.13.1
# -c requirements/common-constraints.txt
# scikit-learn
# sentence-transformers
sentence-transformers==3.4.1
sentence-transformers==4.0.2
# via
# -c requirements/common-constraints.txt
# llama-index-embeddings-huggingface
@@ -226,7 +245,7 @@ sniffio==1.3.1
# via
# -c requirements/common-constraints.txt
# anyio
sqlalchemy[asyncio]==2.0.39
sqlalchemy[asyncio]==2.0.40
# via
# -c requirements/common-constraints.txt
# llama-index-core
@@ -234,7 +253,7 @@ sympy==1.13.3
# via
# -c requirements/common-constraints.txt
# torch
tenacity==9.0.0
tenacity==9.1.2
# via
# -c requirements/common-constraints.txt
# llama-index-core
@@ -263,11 +282,11 @@ tqdm==4.67.1
# nltk
# sentence-transformers
# transformers
transformers==4.49.0
transformers==4.51.2
# via
# -c requirements/common-constraints.txt
# sentence-transformers
typing-extensions==4.12.2
typing-extensions==4.13.2
# via
# -c requirements/common-constraints.txt
# anyio
@@ -275,15 +294,21 @@ typing-extensions==4.12.2
# llama-index-core
# pydantic
# pydantic-core
# sentence-transformers
# sqlalchemy
# torch
# typing-inspect
# typing-inspection
typing-inspect==0.9.0
# via
# -c requirements/common-constraints.txt
# dataclasses-json
# llama-index-core
urllib3==2.3.0
typing-inspection==0.4.0
# via
# -c requirements/common-constraints.txt
# pydantic
urllib3==2.4.0
# via
# -c requirements/common-constraints.txt
# requests
@@ -292,7 +317,7 @@ wrapt==1.17.2
# -c requirements/common-constraints.txt
# deprecated
# llama-index-core
yarl==1.18.3
yarl==1.19.0
# via
# -c requirements/common-constraints.txt
# aiohttp

View File

@@ -4,7 +4,7 @@ greenlet==3.1.1
# via
# -c requirements/common-constraints.txt
# playwright
playwright==1.50.0
playwright==1.51.0
# via
# -c requirements/common-constraints.txt
# -r requirements/requirements-playwright.in
@@ -12,7 +12,7 @@ pyee==12.1.1
# via
# -c requirements/common-constraints.txt
# playwright
typing-extensions==4.12.2
typing-extensions==4.13.2
# via
# -c requirements/common-constraints.txt
# pyee

235
scripts/30k-image.py Normal file
View File

@@ -0,0 +1,235 @@
#!/usr/bin/env python
"""
Generate a celebratory SVG image for Aider reaching 30,000 GitHub stars.
This creates a shareable social media graphic with confetti animation.
"""
import argparse
import base64
import math
import os
import random
from pathlib import Path
# Default colors for the celebration image
AIDER_GREEN = "#14b014"
AIDER_BLUE = "#4C6EF5"
DARK_COLOR = "#212529"
LIGHT_COLOR = "#F8F9FA"
GOLD_COLOR = "#f1c40f"
# Default dimensions for social sharing
DEFAULT_WIDTH = 1200
DEFAULT_HEIGHT = 630
def embed_font():
"""Returns base64 encoded font data for the GlassTTYVT220 font."""
# Path to the font file
font_path = (
Path(__file__).parent.parent / "aider" / "website" / "assets" / "Glass_TTY_VT220.ttf"
)
# If font file doesn't exist, return empty string
if not font_path.exists():
print(f"Warning: Font file not found at {font_path}")
return ""
# Read and encode the font file
with open(font_path, "rb") as f:
font_data = f.read()
# Return base64 encoded font data
return base64.b64encode(font_data).decode("utf-8")
def generate_confetti(count=150, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT):
"""Generate SVG confetti elements for the celebration."""
confetti = []
colors = [AIDER_GREEN, AIDER_BLUE, GOLD_COLOR, "#e74c3c", "#9b59b6", "#3498db", "#2ecc71"]
# Define text safe zones
# Main content safe zone (centered area)
safe_zone_x_min = width * 0.2
safe_zone_x_max = width * 0.8
safe_zone_y_min = height * 0.25
safe_zone_y_max = height * 0.75
# Footer safe zone (for GitHub URL)
footer_safe_zone_x_min = width * 0.25
footer_safe_zone_x_max = width * 0.75
footer_safe_zone_y_min = height - 100 # 100px from bottom
footer_safe_zone_y_max = height # Bottom of image
# Keep trying until we have enough confetti pieces
attempts = 0
confetti_count = 0
while confetti_count < count and attempts < count * 3:
attempts += 1
# Generate random position
x = random.randint(0, width)
y = random.randint(0, height)
# Skip if the position is in either of the safe zones
if (
(safe_zone_x_min < x < safe_zone_x_max) and (safe_zone_y_min < y < safe_zone_y_max)
) or (
(footer_safe_zone_x_min < x < footer_safe_zone_x_max)
and (footer_safe_zone_y_min < y < footer_safe_zone_y_max)
):
continue
confetti_count += 1
size = random.randint(5, 15)
color = random.choice(colors)
rotation = random.randint(0, 360)
delay = random.uniform(0, 2)
duration = random.uniform(1, 3)
# Randomly choose between rect (square), circle, and star shapes
shape_type = random.choice(["rect", "circle", "star"])
if shape_type == "rect":
shape = f"""<rect x="{x}" y="{y}" width="{size}" height="{size}" fill="{color}"
transform="rotate({rotation}, {x + size/2}, {y + size/2})">
<animate attributeName="opacity" from="1" to="0" dur="{duration}s" begin="{delay}s" repeatCount="indefinite" />
<animate attributeName="y" from="{y}" to="{y + random.randint(200, 400)}" dur="{duration}s" begin="{delay}s" repeatCount="indefinite" />
</rect>"""
elif shape_type == "circle":
shape = f"""<circle cx="{x}" cy="{y}" r="{size/2}" fill="{color}">
<animate attributeName="opacity" from="1" to="0" dur="{duration}s" begin="{delay}s" repeatCount="indefinite" />
<animate attributeName="cy" from="{y}" to="{y + random.randint(200, 400)}" dur="{duration}s" begin="{delay}s" repeatCount="indefinite" />
</circle>"""
else: # star
# Create a simple 5-point star
points = []
for j in range(5):
angle = j * 2 * 3.14159 / 5
x_point = x + (size * 0.5) * math.cos(angle)
y_point = y + (size * 0.5) * math.sin(angle)
points.append(f"{x_point},{y_point}")
# Inner points of the star
inner_angle = angle + 3.14159 / 5
inner_x = x + (size * 0.2) * math.cos(inner_angle)
inner_y = y + (size * 0.2) * math.sin(inner_angle)
points.append(f"{inner_x},{inner_y}")
points_str = " ".join(points)
shape = f"""<polygon points="{points_str}" fill="{color}"
transform="rotate({rotation}, {x}, {y})">
<animate attributeName="opacity" from="1" to="0" dur="{duration}s" begin="{delay}s" repeatCount="indefinite" />
<animate attributeName="transform" from="rotate({rotation}, {x}, {y})" to="rotate({rotation + 360}, {x}, {y})" dur="{duration*2}s" begin="{delay}s" repeatCount="indefinite" />
<animate attributeName="cy" from="{y}" to="{y + random.randint(200, 400)}" dur="{duration}s" begin="{delay}s" repeatCount="indefinite" />
</polygon>"""
confetti.append(shape)
return "\n".join(confetti)
def generate_celebration_svg(output_path=None, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT):
"""Generate a celebratory SVG for 30K GitHub stars."""
# Font embedding
font_data = embed_font()
font_face = f"""
@font-face {{
font-family: 'GlassTTYVT220';
src: url(data:font/truetype;charset=utf-8;base64,{font_data}) format('truetype');
font-weight: normal;
font-style: normal;
}}
""" if font_data else ""
# Generate confetti elements
confetti = generate_confetti(count=150, width=width, height=height)
# Create the SVG content
svg_content = f"""<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg width="{width}" height="{height}" viewBox="0 0 {width} {height}" xmlns="http://www.w3.org/2000/svg">
<defs>
<filter id="glow" x="-20%" y="-20%" width="140%" height="140%">
<feGaussianBlur stdDeviation="10" result="blur" />
<feComponentTransfer in="blur" result="glow">
<feFuncA type="linear" slope="0.7" intercept="0" />
</feComponentTransfer>
<feComposite in="SourceGraphic" in2="glow" operator="over" />
</filter>
<linearGradient id="bg-gradient" x1="0%" y1="0%" x2="100%" y2="100%">
<stop offset="0%" style="stop-color:#121212" />
<stop offset="100%" style="stop-color:#212529" />
</linearGradient>
<clipPath id="rounded-rect">
<rect x="0" y="0" width="{width}" height="{height}" rx="20" ry="20" />
</clipPath>
</defs>
<style>
{font_face}
.main-bg {{ fill: url(#bg-gradient); }}
.aider-logo {{ font-family: 'GlassTTYVT220', monospace; font-size: 120px; fill: {AIDER_GREEN}; text-anchor: middle; filter: url(#glow); }}
.stars-text {{ font-family: 'GlassTTYVT220', monospace; font-size: 72px; fill: {GOLD_COLOR}; text-anchor: middle; filter: url(#glow); }}
.tagline {{ font-family: sans-serif; font-size: 32px; fill: {LIGHT_COLOR}; text-anchor: middle; }}
.footer {{ font-family: sans-serif; font-size: 24px; fill: {LIGHT_COLOR}; text-anchor: middle; opacity: 0.8; }}
</style>
<g clip-path="url(#rounded-rect)">
<!-- Background with pattern -->
<rect class="main-bg" x="0" y="0" width="{width}" height="{height}" />
<!-- Pattern overlay -->
<rect width="{width}" height="{height}" fill="url(#bg-gradient)" opacity="0.9" />
<!-- Confetti animation -->
{confetti}
<!-- Main content -->
<text x="{width/2}" y="{height/2 - 100}" class="aider-logo">aider</text>
<text x="{width/2}" y="{height/2 + 20}" class="stars-text">30,000 GitHub stars!</text>
<text x="{width/2}" y="{height/2 + 100}" class="tagline">Thank you to our amazing community!</text>
<text x="{width/2}" y="{height - 50}" class="footer">github.com/Aider-AI/aider</text>
</g>
</svg>
"""
# Write to file if output path is specified
if output_path:
with open(output_path, "w") as f:
f.write(svg_content)
print(f"Celebration SVG saved to {output_path}")
return svg_content
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a celebration SVG for Aider's 30K GitHub stars"
)
parser.add_argument(
"--output",
"-o",
type=str,
default="aider-30k-stars.svg",
help="Output file path (default: aider-30k-stars.svg)",
)
parser.add_argument(
"--width",
"-w",
type=int,
default=DEFAULT_WIDTH,
help=f"Image width in pixels (default: {DEFAULT_WIDTH})",
)
parser.add_argument(
"--height",
"-ht",
type=int,
default=DEFAULT_HEIGHT,
help=f"Image height in pixels (default: {DEFAULT_HEIGHT})",
)
args = parser.parse_args()
# Generate the SVG
generate_celebration_svg(args.output, args.width, args.height)

View File

@@ -288,7 +288,7 @@ src="https://img.shields.io/github/stars/Aider-AI/aider?style=flat-square&logo=g
src="https://img.shields.io/badge/📦%20Installs-{downloads_formatted}-2ecc71?style=flat-square&labelColor=555555"/></a>
<img alt="Tokens per week" title="{TOKENS_WEEKLY_TOOLTIP}"
src="https://img.shields.io/badge/📈%20Tokens%2Fweek-{TOKENS_PER_WEEK}-3498db?style=flat-square&labelColor=555555"/>
<a href="https://openrouter.ai/"><img alt="OpenRouter Ranking" title="{OPENROUTER_TOOLTIP}"
<a href="https://openrouter.ai/#options-menu"><img alt="OpenRouter Ranking" title="{OPENROUTER_TOOLTIP}"
src="https://img.shields.io/badge/🏆%20OpenRouter-Top%2020-9b59b6?style=flat-square&labelColor=555555"/></a>
<a href="https://aider.chat/HISTORY.html"><img alt="Singularity" title="{SINGULARITY_TOOLTIP}"
src="https://img.shields.io/badge/🔄%20Singularity-{aider_percent_rounded}%25-e74c3c?style=flat-square&labelColor=555555"/></a>""" # noqa
@@ -398,7 +398,7 @@ def get_badges_html():
<span class="badge-label">📈 Tokens/week</span>
<span class="badge-value">{TOKENS_PER_WEEK}</span>
</div>
<a href="https://openrouter.ai/" class="github-badge badge-router" title="{OPENROUTER_TOOLTIP}">
<a href="https://openrouter.ai/#options-menu" class="github-badge badge-router" title="{OPENROUTER_TOOLTIP}">
<span class="badge-label">🏆 OpenRouter</span>
<span class="badge-value">Top 20</span>
</a>
@@ -410,6 +410,123 @@ def get_badges_html():
return html
def get_testimonials_js():
"""
Extract testimonials from README.md and format them as JavaScript array
"""
# Path to README.md, relative to this script
readme_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "README.md"
)
testimonials = []
in_testimonials_section = False
try:
with open(readme_path, "r", encoding="utf-8") as f:
lines = f.readlines()
# Find the testimonials section
for i, line in enumerate(lines):
if line.strip() == "## Kind Words From Users":
in_testimonials_section = True
# Start processing from the next line
start_idx = i + 1
break
# If we found the section
if in_testimonials_section:
for i in range(start_idx, len(lines)):
line = lines[i]
# If we've hit another section, stop
if line.startswith("##"):
break
# Process testimonial lines
if line.strip().startswith('- *"'):
try:
# Get the full line
full_line = line.strip()
# Extract the quote text between *" and "*
if '*"' in full_line and '"*' in full_line:
quote_parts = full_line.split('*"')
if len(quote_parts) > 1:
quote_text = quote_parts[1].split('"*')[0].strip()
# Default values
author = "Anonymous"
link = ""
# Try to extract author and link if they exist
# Check for the em dash format first: "— [author](link)"
if "— [" in full_line and "](" in full_line:
author_parts = full_line.split("— [")
if len(author_parts) > 1:
author = author_parts[1].split("]")[0].strip()
# Extract the link if it exists
link_parts = full_line.split("](")
if len(link_parts) > 1:
link = link_parts[1].split(")")[0].strip()
# Check for regular dash format: "- [author](link)"
elif " - [" in full_line and "](" in full_line:
author_parts = full_line.split(" - [")
if len(author_parts) > 1:
author = author_parts[1].split("]")[0].strip()
# Extract the link if it exists
link_parts = full_line.split("](")
if len(link_parts) > 1:
link = link_parts[1].split(")")[0].strip()
# Check for em dash without link: "— author"
elif "" in full_line:
# Format without a link, just plain text author
author_parts = full_line.split("")
if len(author_parts) > 1:
author = author_parts[1].strip()
# Check for regular dash without link: "- author"
elif " - " in full_line:
# Format without a link, just plain text author
author_parts = full_line.split(" - ")
if len(author_parts) > 1:
author = author_parts[1].strip()
testimonials.append(
{"text": quote_text, "author": author, "link": link}
)
except Exception as e:
print(
f"Error parsing testimonial line: {line}. Error: {e}",
file=sys.stderr,
)
continue
# Format as JavaScript array with script tags
if not testimonials:
print("No testimonials found in README.md", file=sys.stderr)
return "<script>\nconst testimonials = [];\n</script>"
js_array = "<script>\nconst testimonials = [\n"
for i, t in enumerate(testimonials):
js_array += " {\n"
js_array += f" text: \"{t['text']}\",\n"
js_array += f" author: \"{t['author']}\",\n"
js_array += f" link: \"{t['link']}\"\n"
js_array += " }"
if i < len(testimonials) - 1:
js_array += ","
js_array += "\n"
js_array += "];\n</script>"
return js_array
except Exception as e:
print(f"Error reading testimonials from README: {e}", file=sys.stderr)
# Return empty array as fallback
return "<script>\nconst testimonials = [];\n</script>"
def main():
# Load environment variables from .env file
load_dotenv()
@@ -492,6 +609,11 @@ def main():
percentage, version = get_latest_release_aider_percentage()
print(f"Aider wrote {percentage:.2f}% of code in the LATEST release ({version})")
# Get testimonials JavaScript
testimonials_js = get_testimonials_js()
print("\nTestimonials JavaScript:")
print(testimonials_js)
if __name__ == "__main__":
main()

View File

@@ -81,15 +81,20 @@ def main():
parser.add_argument(
"--dry-run", action="store_true", help="Print each step without actually executing them"
)
parser.add_argument("--force", action="store_true", help="Skip pre-push checks")
args = parser.parse_args()
dry_run = args.dry_run
force = args.force
# Perform checks before proceeding
check_branch()
check_working_directory_clean()
check_main_branch_up_to_date()
check_ok_to_push()
# Perform checks before proceeding unless --force is used
if not force:
check_branch()
check_working_directory_clean()
check_main_branch_up_to_date()
check_ok_to_push()
else:
print("Skipping pre-push checks due to --force flag.")
new_version_str = args.new_version
if not re.match(r"^\d+\.\d+\.\d+$", new_version_str):

View File

@@ -194,8 +194,8 @@ class TestCoder(unittest.TestCase):
mock.return_value = set([str(fname1), str(fname2), str(fname3)])
coder.repo.get_tracked_files = mock
# Check that file mentions skip files with duplicate basenames
mentioned = coder.get_file_mentions(f"Check {fname2} and {fname3}")
# Check that file mentions of a pure basename skips files with duplicate basenames
mentioned = coder.get_file_mentions(f"Check {fname2.name} and {fname3}")
self.assertEqual(mentioned, {str(fname3)})
# Add a read-only file with same basename
@@ -366,6 +366,45 @@ class TestCoder(unittest.TestCase):
f"Failed to extract mentions from: {content}",
)
def test_get_file_mentions_multiline_backticks(self):
with GitTemporaryDirectory():
io = InputOutput(pretty=False, yes=True)
coder = Coder.create(self.GPT35, None, io)
# Create test files
test_files = [
"swebench/harness/test_spec/python.py",
"swebench/harness/test_spec/javascript.py",
]
for fname in test_files:
fpath = Path(fname)
fpath.parent.mkdir(parents=True, exist_ok=True)
fpath.touch()
# Mock get_addable_relative_files to return our test files
coder.get_addable_relative_files = MagicMock(return_value=set(test_files))
# Input text with multiline backticked filenames
content = """
Could you please **add the following files to the chat**?
1. `swebench/harness/test_spec/python.py`
2. `swebench/harness/test_spec/javascript.py`
Once I have these, I can show you precisely how to do the thing.
"""
expected_mentions = {
"swebench/harness/test_spec/python.py",
"swebench/harness/test_spec/javascript.py",
}
mentioned_files = coder.get_file_mentions(content)
self.assertEqual(
mentioned_files,
expected_mentions,
f"Failed to extract mentions from multiline backticked content: {content}",
)
def test_get_file_mentions_path_formats(self):
with GitTemporaryDirectory():
io = InputOutput(pretty=False, yes=True)

View File

@@ -1124,6 +1124,29 @@ class TestCommands(TestCase):
# Check that the output was added to cur_messages
self.assertTrue(any("exit 1" in msg["content"] for msg in coder.cur_messages))
def test_cmd_test_returns_output_on_failure(self):
with ChdirTemporaryDirectory():
io = InputOutput(pretty=False, fancy_input=False, yes=False)
from aider.coders import Coder
coder = Coder.create(self.GPT35, None, io)
commands = Commands(io, coder)
# Define a command that prints to stderr and exits with non-zero status
test_cmd = "echo 'error output' >&2 && exit 1"
expected_output_fragment = "error output"
# Run cmd_test
result = commands.cmd_test(test_cmd)
# Assert that the result contains the expected output
self.assertIsNotNone(result)
self.assertIn(expected_output_fragment, result)
# Check that the output was also added to cur_messages
self.assertTrue(
any(expected_output_fragment in msg["content"] for msg in coder.cur_messages)
)
def test_cmd_add_drop_untracked_files(self):
with GitTemporaryDirectory():
repo = git.Repo()
@@ -1682,6 +1705,27 @@ class TestCommands(TestCase):
self.assertEqual(
context.exception.kwargs.get("main_model").weak_model.name, self.GPT35.weak_model.name
)
# Check that the edit format is updated to the new model's default
self.assertEqual(context.exception.kwargs.get("edit_format"), "diff")
def test_cmd_model_preserves_explicit_edit_format(self):
io = InputOutput(pretty=False, fancy_input=False, yes=True)
# Use gpt-3.5-turbo (default 'diff')
coder = Coder.create(self.GPT35, None, io)
# Explicitly set edit format to something else
coder.edit_format = "udiff"
commands = Commands(io, coder)
# Mock sanity check to avoid network calls
with mock.patch("aider.models.sanity_check_models"):
# Test switching the main model to gpt-4 (default 'whole')
with self.assertRaises(SwitchCoder) as context:
commands.cmd_model("gpt-4")
# Check that the SwitchCoder exception contains the correct model configuration
self.assertEqual(context.exception.kwargs.get("main_model").name, "gpt-4")
# Check that the edit format is preserved
self.assertEqual(context.exception.kwargs.get("edit_format"), "udiff")
def test_cmd_editor_model(self):
io = InputOutput(pretty=False, fancy_input=False, yes=True)
@@ -1716,6 +1760,25 @@ class TestCommands(TestCase):
)
self.assertEqual(context.exception.kwargs.get("main_model").weak_model.name, "gpt-4")
def test_cmd_model_updates_default_edit_format(self):
io = InputOutput(pretty=False, fancy_input=False, yes=True)
# Use gpt-3.5-turbo (default 'diff')
coder = Coder.create(self.GPT35, None, io)
# Ensure current edit format is the default
self.assertEqual(coder.edit_format, self.GPT35.edit_format)
commands = Commands(io, coder)
# Mock sanity check to avoid network calls
with mock.patch("aider.models.sanity_check_models"):
# Test switching the main model to gpt-4 (default 'whole')
with self.assertRaises(SwitchCoder) as context:
commands.cmd_model("gpt-4")
# Check that the SwitchCoder exception contains the correct model configuration
self.assertEqual(context.exception.kwargs.get("main_model").name, "gpt-4")
# Check that the edit format is updated to the new model's default
self.assertEqual(context.exception.kwargs.get("edit_format"), "diff")
def test_cmd_ask(self):
io = InputOutput(pretty=False, fancy_input=False, yes=True)
coder = Coder.create(self.GPT35, None, io)

View File

@@ -63,3 +63,22 @@ def test_context_window_error():
)
ex_info = ex.get_ex_info(ctx_error)
assert ex_info.retry is False
def test_openrouter_error():
"""Test specific handling of OpenRouter API errors"""
ex = LiteLLMExceptions()
from litellm import APIConnectionError
# Create an APIConnectionError with OpenrouterException message
openrouter_error = APIConnectionError(
message="APIConnectionError: OpenrouterException - 'choices'",
model="openrouter/model",
llm_provider="openrouter",
)
ex_info = ex.get_ex_info(openrouter_error)
assert ex_info.retry is True
assert "OpenRouter" in ex_info.description
assert "overloaded" in ex_info.description
assert "rate" in ex_info.description

View File

@@ -447,6 +447,34 @@ class TestInputOutputMultilineMode(unittest.TestCase):
self.assertEqual(ensure_hash_prefix("xyz"), "xyz") # Invalid hex chars
self.assertEqual(ensure_hash_prefix("12345g"), "12345g") # Invalid hex chars
def test_tool_output_color_handling(self):
"""Test that tool_output correctly handles hex colors without # prefix"""
from unittest.mock import patch
from rich.text import Text
# Create IO with hex color without # for tool_output_color
io = InputOutput(tool_output_color="FFA500", pretty=True)
# Patch console.print to avoid actual printing
with patch.object(io.console, "print") as mock_print:
# This would raise ColorParseError without the fix
io.tool_output("Test message")
# Verify the call was made without error
mock_print.assert_called_once()
# Verify the style was correctly created with # prefix
# The first argument is the message, second would be the style
kwargs = mock_print.call_args.kwargs
self.assertIn("style", kwargs)
# Test with other hex color
io = InputOutput(tool_output_color="00FF00", pretty=True)
with patch.object(io.console, "print") as mock_print:
io.tool_output("Test message")
mock_print.assert_called_once()
if __name__ == "__main__":
unittest.main()

View File

@@ -14,7 +14,7 @@ from prompt_toolkit.output import DummyOutput
from aider.coders import Coder
from aider.dump import dump # noqa: F401
from aider.io import InputOutput
from aider.main import check_gitignore, main, setup_git
from aider.main import check_gitignore, load_dotenv_files, main, setup_git
from aider.utils import GitTemporaryDirectory, IgnorantTemporaryDirectory, make_repo
@@ -983,7 +983,7 @@ class TestMain(TestCase):
coder = main(
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
)
self.assertIn("openrouter/anthropic/claude", coder.main_model.name.lower())
self.assertIn("openrouter/", coder.main_model.name.lower())
del os.environ["OPENROUTER_API_KEY"]
# Test OpenAI API key
@@ -999,12 +999,15 @@ class TestMain(TestCase):
coder = main(
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
)
self.assertIn("flash", coder.main_model.name.lower())
self.assertIn("gemini", coder.main_model.name.lower())
del os.environ["GEMINI_API_KEY"]
# Test no API keys
result = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput())
self.assertEqual(result, 1)
# Test no API keys - should offer OpenRouter OAuth
with patch("aider.onboarding.offer_openrouter_oauth") as mock_offer_oauth:
mock_offer_oauth.return_value = None # Simulate user declining or failure
result = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput())
self.assertEqual(result, 1) # Expect failure since no model could be selected
mock_offer_oauth.assert_called_once()
def test_model_precedence(self):
with GitTemporaryDirectory():
@@ -1246,3 +1249,99 @@ class TestMain(TestCase):
# Only set_reasoning_effort should be called, not set_thinking_tokens
mock_instance.set_reasoning_effort.assert_called_once_with("3")
mock_instance.set_thinking_tokens.assert_not_called()
@patch("aider.main.InputOutput")
def test_stream_and_cache_warning(self, MockInputOutput):
mock_io_instance = MockInputOutput.return_value
with GitTemporaryDirectory():
main(
["--stream", "--cache-prompts", "--exit", "--yes"],
input=DummyInput(),
output=DummyOutput(),
)
mock_io_instance.tool_warning.assert_called_with(
"Cost estimates may be inaccurate when using streaming and caching."
)
@patch("aider.main.InputOutput")
def test_stream_without_cache_no_warning(self, MockInputOutput):
mock_io_instance = MockInputOutput.return_value
with GitTemporaryDirectory():
main(
["--stream", "--exit", "--yes"],
input=DummyInput(),
output=DummyOutput(),
)
for call in mock_io_instance.tool_warning.call_args_list:
self.assertNotIn("Cost estimates may be inaccurate", call[0][0])
def test_load_dotenv_files_override(self):
with GitTemporaryDirectory() as git_dir:
git_dir = Path(git_dir)
# Create fake home and .aider directory
fake_home = git_dir / "fake_home"
fake_home.mkdir()
aider_dir = fake_home / ".aider"
aider_dir.mkdir()
# Create oauth keys file
oauth_keys_file = aider_dir / "oauth-keys.env"
oauth_keys_file.write_text("OAUTH_VAR=oauth_val\nSHARED_VAR=oauth_shared\n")
# Create git root .env file
git_root_env = git_dir / ".env"
git_root_env.write_text("GIT_VAR=git_val\nSHARED_VAR=git_shared\n")
# Create CWD .env file in a subdir
cwd_subdir = git_dir / "subdir"
cwd_subdir.mkdir()
cwd_env = cwd_subdir / ".env"
cwd_env.write_text("CWD_VAR=cwd_val\nSHARED_VAR=cwd_shared\n")
# Change to subdir
original_cwd = os.getcwd()
os.chdir(cwd_subdir)
# Clear relevant env vars before test
for var in ["OAUTH_VAR", "SHARED_VAR", "GIT_VAR", "CWD_VAR"]:
if var in os.environ:
del os.environ[var]
with patch("pathlib.Path.home", return_value=fake_home):
loaded_files = load_dotenv_files(str(git_dir), None)
# Assert files were loaded in expected order (oauth first)
self.assertIn(str(oauth_keys_file.resolve()), loaded_files)
self.assertIn(str(git_root_env.resolve()), loaded_files)
self.assertIn(str(cwd_env.resolve()), loaded_files)
self.assertLess(
loaded_files.index(str(oauth_keys_file.resolve())),
loaded_files.index(str(git_root_env.resolve())),
)
self.assertLess(
loaded_files.index(str(git_root_env.resolve())),
loaded_files.index(str(cwd_env.resolve())),
)
# Assert environment variables reflect the override order
self.assertEqual(os.environ.get("OAUTH_VAR"), "oauth_val")
self.assertEqual(os.environ.get("GIT_VAR"), "git_val")
self.assertEqual(os.environ.get("CWD_VAR"), "cwd_val")
# SHARED_VAR should be overridden by the last loaded file (cwd .env)
self.assertEqual(os.environ.get("SHARED_VAR"), "cwd_shared")
# Restore CWD
os.chdir(original_cwd)
@patch("aider.main.InputOutput")
def test_cache_without_stream_no_warning(self, MockInputOutput):
mock_io_instance = MockInputOutput.return_value
with GitTemporaryDirectory():
main(
["--cache-prompts", "--exit", "--yes", "--no-stream"],
input=DummyInput(),
output=DummyOutput(),
)
for call in mock_io_instance.tool_warning.call_args_list:
self.assertNotIn("Cost estimates may be inaccurate", call[0][0])

View File

@@ -0,0 +1,439 @@
import argparse
import base64
import hashlib
import os
import unittest
from unittest.mock import MagicMock, patch
import requests
# Import the functions to be tested
from aider.onboarding import (
check_openrouter_tier,
exchange_code_for_key,
find_available_port,
generate_pkce_codes,
offer_openrouter_oauth,
select_default_model,
try_to_select_default_model,
)
# Mock the Analytics class as it's used in some functions
class DummyAnalytics:
def event(self, *args, **kwargs):
pass
# Mock the InputOutput class
class DummyIO:
def tool_output(self, *args, **kwargs):
pass
def tool_warning(self, *args, **kwargs):
pass
def tool_error(self, *args, **kwargs):
pass
def confirm_ask(self, *args, **kwargs):
return False # Default to no confirmation
def offer_url(self, *args, **kwargs):
pass
class TestOnboarding(unittest.TestCase):
@patch("requests.get")
def test_check_openrouter_tier_free(self, mock_get):
"""Test check_openrouter_tier identifies free tier."""
mock_response = MagicMock()
mock_response.json.return_value = {"data": {"is_free_tier": True}}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
self.assertTrue(check_openrouter_tier("fake_key"))
mock_get.assert_called_once_with(
"https://openrouter.ai/api/v1/auth/key",
headers={"Authorization": "Bearer fake_key"},
timeout=5,
)
@patch("requests.get")
def test_check_openrouter_tier_paid(self, mock_get):
"""Test check_openrouter_tier identifies paid tier."""
mock_response = MagicMock()
mock_response.json.return_value = {"data": {"is_free_tier": False}}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
self.assertFalse(check_openrouter_tier("fake_key"))
@patch("requests.get")
def test_check_openrouter_tier_api_error(self, mock_get):
"""Test check_openrouter_tier defaults to free on API error."""
mock_get.side_effect = requests.exceptions.RequestException("API Error")
self.assertTrue(check_openrouter_tier("fake_key"))
@patch("requests.get")
def test_check_openrouter_tier_missing_key(self, mock_get):
"""Test check_openrouter_tier defaults to free if key is missing in response."""
mock_response = MagicMock()
mock_response.json.return_value = {"data": {}} # Missing 'is_free_tier'
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
self.assertTrue(check_openrouter_tier("fake_key"))
@patch("aider.onboarding.check_openrouter_tier")
@patch.dict(os.environ, {}, clear=True)
def test_try_select_default_model_no_keys(self, mock_check_tier):
"""Test no model is selected when no keys are present."""
self.assertIsNone(try_to_select_default_model())
mock_check_tier.assert_not_called()
@patch("aider.onboarding.check_openrouter_tier", return_value=True) # Assume free tier
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "or_key"}, clear=True)
def test_try_select_default_model_openrouter_free(self, mock_check_tier):
"""Test OpenRouter free model selection."""
self.assertEqual(
try_to_select_default_model(), "openrouter/google/gemini-2.5-pro-exp-03-25:free"
)
mock_check_tier.assert_called_once_with("or_key")
@patch("aider.onboarding.check_openrouter_tier", return_value=False) # Assume paid tier
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "or_key"}, clear=True)
def test_try_select_default_model_openrouter_paid(self, mock_check_tier):
"""Test OpenRouter paid model selection."""
self.assertEqual(try_to_select_default_model(), "openrouter/anthropic/claude-3.7-sonnet")
mock_check_tier.assert_called_once_with("or_key")
@patch("aider.onboarding.check_openrouter_tier")
@patch.dict(os.environ, {"ANTHROPIC_API_KEY": "an_key"}, clear=True)
def test_try_select_default_model_anthropic(self, mock_check_tier):
"""Test Anthropic model selection."""
self.assertEqual(try_to_select_default_model(), "sonnet")
mock_check_tier.assert_not_called()
@patch("aider.onboarding.check_openrouter_tier")
@patch.dict(os.environ, {"DEEPSEEK_API_KEY": "ds_key"}, clear=True)
def test_try_select_default_model_deepseek(self, mock_check_tier):
"""Test Deepseek model selection."""
self.assertEqual(try_to_select_default_model(), "deepseek")
mock_check_tier.assert_not_called()
@patch("aider.onboarding.check_openrouter_tier")
@patch.dict(os.environ, {"OPENAI_API_KEY": "oa_key"}, clear=True)
def test_try_select_default_model_openai(self, mock_check_tier):
"""Test OpenAI model selection."""
self.assertEqual(try_to_select_default_model(), "gpt-4o")
mock_check_tier.assert_not_called()
@patch("aider.onboarding.check_openrouter_tier")
@patch.dict(os.environ, {"GEMINI_API_KEY": "gm_key"}, clear=True)
def test_try_select_default_model_gemini(self, mock_check_tier):
"""Test Gemini model selection."""
self.assertEqual(try_to_select_default_model(), "gemini/gemini-2.5-pro-exp-03-25")
mock_check_tier.assert_not_called()
@patch("aider.onboarding.check_openrouter_tier")
@patch.dict(os.environ, {"VERTEXAI_PROJECT": "vx_proj"}, clear=True)
def test_try_select_default_model_vertex(self, mock_check_tier):
"""Test Vertex AI model selection."""
self.assertEqual(try_to_select_default_model(), "vertex_ai/gemini-2.5-pro-exp-03-25")
mock_check_tier.assert_not_called()
@patch("aider.onboarding.check_openrouter_tier", return_value=False) # Paid
@patch.dict(
os.environ, {"OPENROUTER_API_KEY": "or_key", "OPENAI_API_KEY": "oa_key"}, clear=True
)
def test_try_select_default_model_priority_openrouter(self, mock_check_tier):
"""Test OpenRouter key takes priority."""
self.assertEqual(try_to_select_default_model(), "openrouter/anthropic/claude-3.7-sonnet")
mock_check_tier.assert_called_once_with("or_key")
@patch("aider.onboarding.check_openrouter_tier")
@patch.dict(os.environ, {"ANTHROPIC_API_KEY": "an_key", "OPENAI_API_KEY": "oa_key"}, clear=True)
def test_try_select_default_model_priority_anthropic(self, mock_check_tier):
"""Test Anthropic key takes priority over OpenAI."""
self.assertEqual(try_to_select_default_model(), "sonnet")
mock_check_tier.assert_not_called()
@patch("socketserver.TCPServer")
def test_find_available_port_success(self, mock_tcp_server):
"""Test finding an available port."""
# Simulate port 8484 being available
mock_tcp_server.return_value.__enter__.return_value = None # Allow context manager
port = find_available_port(start_port=8484, end_port=8484)
self.assertEqual(port, 8484)
mock_tcp_server.assert_called_once_with(("localhost", 8484), None)
@patch("socketserver.TCPServer")
def test_find_available_port_in_use(self, mock_tcp_server):
"""Test finding the next available port if the first is in use."""
# Simulate port 8484 raising OSError, 8485 being available
mock_tcp_server.side_effect = [OSError, MagicMock()]
mock_tcp_server.return_value.__enter__.return_value = None # Allow context manager
port = find_available_port(start_port=8484, end_port=8485)
self.assertEqual(port, 8485)
self.assertEqual(mock_tcp_server.call_count, 2)
mock_tcp_server.assert_any_call(("localhost", 8484), None)
mock_tcp_server.assert_any_call(("localhost", 8485), None)
@patch("socketserver.TCPServer", side_effect=OSError)
def test_find_available_port_none_available(self, mock_tcp_server):
"""Test returning None if no ports are available in the range."""
port = find_available_port(start_port=8484, end_port=8485)
self.assertIsNone(port)
self.assertEqual(mock_tcp_server.call_count, 2) # Tried 8484 and 8485
def test_generate_pkce_codes(self):
"""Test PKCE code generation."""
verifier, challenge = generate_pkce_codes()
self.assertIsInstance(verifier, str)
self.assertIsInstance(challenge, str)
self.assertGreater(len(verifier), 40) # Check reasonable length
self.assertGreater(len(challenge), 40)
# Verify the challenge is the SHA256 hash of the verifier, base64 encoded
hasher = hashlib.sha256()
hasher.update(verifier.encode("utf-8"))
expected_challenge = base64.urlsafe_b64encode(hasher.digest()).rstrip(b"=").decode("utf-8")
self.assertEqual(challenge, expected_challenge)
@patch("requests.post")
def test_exchange_code_for_key_success(self, mock_post):
"""Test successful code exchange for API key."""
mock_response = MagicMock()
mock_response.json.return_value = {"key": "test_api_key"}
mock_response.raise_for_status.return_value = None
mock_post.return_value = mock_response
io_mock = DummyIO()
api_key = exchange_code_for_key("auth_code", "verifier", io_mock)
self.assertEqual(api_key, "test_api_key")
mock_post.assert_called_once_with(
"https://openrouter.ai/api/v1/auth/keys",
headers={"Content-Type": "application/json"},
json={
"code": "auth_code",
"code_verifier": "verifier",
"code_challenge_method": "S256",
},
timeout=30,
)
@patch("requests.post")
def test_exchange_code_for_key_missing_key(self, mock_post):
"""Test code exchange when 'key' is missing in response."""
mock_response = MagicMock()
mock_response.json.return_value = {"other_data": "value"} # Missing 'key'
mock_response.raise_for_status.return_value = None
mock_response.text = '{"other_data": "value"}'
mock_post.return_value = mock_response
io_mock = DummyIO()
io_mock.tool_error = MagicMock() # Track error output
api_key = exchange_code_for_key("auth_code", "verifier", io_mock)
self.assertIsNone(api_key)
io_mock.tool_error.assert_any_call("Error: 'key' not found in OpenRouter response.")
io_mock.tool_error.assert_any_call('Response: {"other_data": "value"}')
@patch("requests.post")
def test_exchange_code_for_key_http_error(self, mock_post):
"""Test code exchange with HTTP error."""
mock_response = MagicMock()
mock_response.status_code = 400
mock_response.reason = "Bad Request"
mock_response.text = '{"error": "invalid_code"}'
http_error = requests.exceptions.HTTPError(response=mock_response)
mock_post.side_effect = http_error
io_mock = DummyIO()
io_mock.tool_error = MagicMock()
api_key = exchange_code_for_key("auth_code", "verifier", io_mock)
self.assertIsNone(api_key)
io_mock.tool_error.assert_any_call(
"Error exchanging code for OpenRouter key: 400 Bad Request"
)
io_mock.tool_error.assert_any_call('Response: {"error": "invalid_code"}')
@patch("requests.post")
def test_exchange_code_for_key_timeout(self, mock_post):
"""Test code exchange with timeout."""
mock_post.side_effect = requests.exceptions.Timeout("Timeout")
io_mock = DummyIO()
io_mock.tool_error = MagicMock()
api_key = exchange_code_for_key("auth_code", "verifier", io_mock)
self.assertIsNone(api_key)
io_mock.tool_error.assert_called_once_with(
"Error: Request to OpenRouter timed out during code exchange."
)
@patch("requests.post")
def test_exchange_code_for_key_request_exception(self, mock_post):
"""Test code exchange with general request exception."""
req_exception = requests.exceptions.RequestException("Network Error")
mock_post.side_effect = req_exception
io_mock = DummyIO()
io_mock.tool_error = MagicMock()
api_key = exchange_code_for_key("auth_code", "verifier", io_mock)
self.assertIsNone(api_key)
io_mock.tool_error.assert_called_once_with(
f"Error exchanging code for OpenRouter key: {req_exception}"
)
# --- Tests for select_default_model ---
@patch("aider.onboarding.try_to_select_default_model", return_value="gpt-4o")
@patch("aider.onboarding.offer_openrouter_oauth")
def test_select_default_model_already_specified(self, mock_offer_oauth, mock_try_select):
"""Test select_default_model returns args.model if provided."""
args = argparse.Namespace(model="specific-model")
io_mock = DummyIO()
analytics_mock = DummyAnalytics()
selected_model = select_default_model(args, io_mock, analytics_mock)
self.assertEqual(selected_model, "specific-model")
mock_try_select.assert_not_called()
mock_offer_oauth.assert_not_called()
@patch("aider.onboarding.try_to_select_default_model", return_value="gpt-4o")
@patch("aider.onboarding.offer_openrouter_oauth")
def test_select_default_model_found_via_env(self, mock_offer_oauth, mock_try_select):
"""Test select_default_model returns model found by try_to_select."""
args = argparse.Namespace(model=None) # No model specified
io_mock = DummyIO()
io_mock.tool_warning = MagicMock() # Track warnings
analytics_mock = DummyAnalytics()
analytics_mock.event = MagicMock() # Track events
selected_model = select_default_model(args, io_mock, analytics_mock)
self.assertEqual(selected_model, "gpt-4o")
mock_try_select.assert_called_once()
io_mock.tool_warning.assert_called_once_with(
"Using gpt-4o model with API key from environment."
)
analytics_mock.event.assert_called_once_with("auto_model_selection", model="gpt-4o")
mock_offer_oauth.assert_not_called()
@patch(
"aider.onboarding.try_to_select_default_model", side_effect=[None, None]
) # Fails first, fails after oauth attempt
@patch(
"aider.onboarding.offer_openrouter_oauth", return_value=False
) # OAuth offered but fails/declined
def test_select_default_model_no_keys_oauth_fail(self, mock_offer_oauth, mock_try_select):
"""Test select_default_model offers OAuth when no keys, but OAuth fails."""
args = argparse.Namespace(model=None)
io_mock = DummyIO()
io_mock.tool_warning = MagicMock()
io_mock.offer_url = MagicMock()
analytics_mock = DummyAnalytics()
selected_model = select_default_model(args, io_mock, analytics_mock)
self.assertIsNone(selected_model)
self.assertEqual(mock_try_select.call_count, 2) # Called before and after oauth attempt
mock_offer_oauth.assert_called_once_with(io_mock, analytics_mock)
io_mock.tool_warning.assert_called_once_with(
"No LLM model was specified and no API keys were provided."
)
io_mock.offer_url.assert_called_once() # Should offer docs URL
@patch(
"aider.onboarding.try_to_select_default_model",
side_effect=[None, "openrouter/google/gemini-2.5-pro-exp-03-25:free"],
) # Fails first, succeeds after oauth
@patch(
"aider.onboarding.offer_openrouter_oauth", return_value=True
) # OAuth offered and succeeds
def test_select_default_model_no_keys_oauth_success(self, mock_offer_oauth, mock_try_select):
"""Test select_default_model offers OAuth, which succeeds."""
args = argparse.Namespace(model=None)
io_mock = DummyIO()
io_mock.tool_warning = MagicMock()
analytics_mock = DummyAnalytics()
selected_model = select_default_model(args, io_mock, analytics_mock)
self.assertEqual(selected_model, "openrouter/google/gemini-2.5-pro-exp-03-25:free")
self.assertEqual(mock_try_select.call_count, 2) # Called before and after oauth
mock_offer_oauth.assert_called_once_with(io_mock, analytics_mock)
# Only one warning is expected: "No LLM model..."
self.assertEqual(io_mock.tool_warning.call_count, 1)
io_mock.tool_warning.assert_called_once_with(
"No LLM model was specified and no API keys were provided."
)
# The second call to try_select finds the model, so the *outer* function logs the usage.
# Note: The warning comes from the second call within select_default_model,
# not try_select itself.
# We verify the final state and model returned.
# --- Tests for offer_openrouter_oauth ---
@patch("aider.onboarding.start_openrouter_oauth_flow", return_value="new_or_key")
@patch.dict(os.environ, {}, clear=True) # Ensure no key exists initially
def test_offer_openrouter_oauth_confirm_yes_success(self, mock_start_oauth):
"""Test offer_openrouter_oauth when user confirms and OAuth succeeds."""
io_mock = DummyIO()
io_mock.confirm_ask = MagicMock(return_value=True) # User says yes
analytics_mock = DummyAnalytics()
analytics_mock.event = MagicMock()
result = offer_openrouter_oauth(io_mock, analytics_mock)
self.assertTrue(result)
io_mock.confirm_ask.assert_called_once()
mock_start_oauth.assert_called_once_with(io_mock, analytics_mock)
self.assertEqual(os.environ.get("OPENROUTER_API_KEY"), "new_or_key")
analytics_mock.event.assert_any_call("oauth_flow_initiated", provider="openrouter")
analytics_mock.event.assert_any_call("oauth_flow_success")
# Clean up env var
del os.environ["OPENROUTER_API_KEY"]
@patch("aider.onboarding.start_openrouter_oauth_flow", return_value=None) # OAuth fails
@patch.dict(os.environ, {}, clear=True)
def test_offer_openrouter_oauth_confirm_yes_fail(self, mock_start_oauth):
"""Test offer_openrouter_oauth when user confirms but OAuth fails."""
io_mock = DummyIO()
io_mock.confirm_ask = MagicMock(return_value=True) # User says yes
io_mock.tool_error = MagicMock()
analytics_mock = DummyAnalytics()
analytics_mock.event = MagicMock()
result = offer_openrouter_oauth(io_mock, analytics_mock)
self.assertFalse(result)
io_mock.confirm_ask.assert_called_once()
mock_start_oauth.assert_called_once_with(io_mock, analytics_mock)
self.assertNotIn("OPENROUTER_API_KEY", os.environ)
io_mock.tool_error.assert_called_once_with(
"OpenRouter authentication did not complete successfully."
)
analytics_mock.event.assert_any_call("oauth_flow_initiated", provider="openrouter")
analytics_mock.event.assert_any_call("oauth_flow_failure")
@patch("aider.onboarding.start_openrouter_oauth_flow")
def test_offer_openrouter_oauth_confirm_no(self, mock_start_oauth):
"""Test offer_openrouter_oauth when user declines."""
io_mock = DummyIO()
io_mock.confirm_ask = MagicMock(return_value=False) # User says no
analytics_mock = DummyAnalytics()
analytics_mock.event = MagicMock()
result = offer_openrouter_oauth(io_mock, analytics_mock)
self.assertFalse(result)
io_mock.confirm_ask.assert_called_once()
mock_start_oauth.assert_not_called()
analytics_mock.event.assert_not_called() # No OAuth events if declined
# --- More complex test for start_openrouter_oauth_flow (simplified) ---
# This test focuses on the successful path, mocking heavily
if __name__ == "__main__":
unittest.main()

View File

@@ -381,6 +381,9 @@ class TestRepoMapAllLanguages(unittest.TestCase):
def test_language_udev(self):
self._test_language_repo_map("udev", "rules", "USB_DRIVER")
def test_language_scala(self):
self._test_language_repo_map("scala", "scala", "Greeter")
def _test_language_repo_map(self, lang, key, symbol):
"""Helper method to test repo map generation for a specific language."""
# Get the fixture file path and name based on language

View File

@@ -155,3 +155,12 @@ def test_ai_comment_pattern():
assert (
question_js_has_bang == "?"
), "Expected at least one bang (!) comment in watch_question.js fixture"
# Test Lisp fixture
lisp_path = fixtures_dir / "watch.lisp"
lisp_lines, lisp_comments, lisp_has_bang = watcher.get_ai_comments(str(lisp_path))
lisp_expected = 7
assert (
len(lisp_lines) == lisp_expected
), f"Expected {lisp_expected} AI comments in Lisp fixture, found {len(lisp_lines)}"
assert lisp_has_bang == "!", "Expected at least one bang (!) comment in Lisp fixture"

View File

@@ -0,0 +1,61 @@
package com.example.test
// A trait definition
trait Greeter {
def greet(name: String): String
}
// A class definition with parameters
class FormalGreeter(prefix: String) extends Greeter {
// A method definition
override def greet(name: String): String = {
s"$prefix, $name!"
}
// A val definition
val defaultPrefix: String = "Hello"
// A var definition
var counter: Int = 0
}
// An object definition
object GreeterFactory {
// A function definition
def createGreeter(formal: Boolean): Greeter = {
if (formal) {
new FormalGreeter("Good day")
} else {
new CasualGreeter
}
}
// A type definition
type GreeterType = Greeter
}
// An enum definition
enum Greeting {
// Simple enum cases
case Hello, Hi, Hey
// Full enum case with parameters
case Custom(text: String)
}
// A class that uses generics
class Container[T](val value: T) {
def map[U](f: T => U): Container[U] = new Container(f(value))
}
// A case class
case class Person(name: String, age: Int) {
def introduce(): String = {
val greeter = GreeterFactory.createGreeter(age > 30)
greeter.greet(name) + s" I am $age years old."
}
}
class CasualGreeter extends Greeter {
override def greet(name: String): String = s"Hey, $name!"
}

19
tests/fixtures/watch.lisp vendored Normal file
View File

@@ -0,0 +1,19 @@
(defun hello-world ()
;; ai this is a simple hello world function
(format t "Hello, World!"))
(defun add (a b)
; ai! fix this function to handle nil values
(+ a b))
(defun multiply (a b)
;;; ai? why is this function not working with large numbers?
(* a b))
; ai this is a single semicolon comment
;; ai this is a double semicolon comment
;;; ai this is a triple semicolon comment
;;;; ai! this is a quadruple semicolon comment