mirror of
https://github.com/Aider-AI/aider
synced 2026-04-26 01:25:17 +02:00
Compare commits
402 Commits
v0.74.3.de
...
v0.76.2.de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3cb6ec9ddb | ||
|
|
c21619608e | ||
|
|
76a8789bc1 | ||
|
|
bbf538e06c | ||
|
|
d94ab3395b | ||
|
|
6d8457a61f | ||
|
|
303f8e1bc9 | ||
|
|
e638116a2f | ||
|
|
5bac9133e6 | ||
|
|
6fde4041ba | ||
|
|
d0c8b38ffc | ||
|
|
be7888ab18 | ||
|
|
11b71fa28c | ||
|
|
c838f9bfd5 | ||
|
|
8d073ce221 | ||
|
|
0be5d39453 | ||
|
|
74ecdf2d3f | ||
|
|
20eacfab0f | ||
|
|
0396e15a3b | ||
|
|
3432a936ea | ||
|
|
87cd2b5dfe | ||
|
|
313b91edbe | ||
|
|
a1f104cb4d | ||
|
|
eadb8d5d0a | ||
|
|
41ae947885 | ||
|
|
84f610c0e9 | ||
|
|
0df959cf68 | ||
|
|
a15d10ea1e | ||
|
|
a37d6e86df | ||
|
|
a405063385 | ||
|
|
f5a5b85e9d | ||
|
|
ba7d941e5b | ||
|
|
804a2d1af9 | ||
|
|
c1bc6e161e | ||
|
|
af1b728b90 | ||
|
|
14e37a82ab | ||
|
|
f8a7854efa | ||
|
|
072ce87051 | ||
|
|
cac9b4460e | ||
|
|
67bf90a149 | ||
|
|
af8558b19e | ||
|
|
1903542f11 | ||
|
|
3ed16fb796 | ||
|
|
6f99392eda | ||
|
|
680dbfbf77 | ||
|
|
51a72b497b | ||
|
|
d6e57dd194 | ||
|
|
e92ab55da6 | ||
|
|
c78b3e0204 | ||
|
|
ac1c05389a | ||
|
|
95583fe2cd | ||
|
|
ddedda9233 | ||
|
|
d30b9d1513 | ||
|
|
4c35f88ea0 | ||
|
|
e6623ae0a8 | ||
|
|
4755578822 | ||
|
|
319d543ac2 | ||
|
|
9e668cda7f | ||
|
|
5447483da2 | ||
|
|
8e22a8d107 | ||
|
|
18d27ab4e4 | ||
|
|
fe60832492 | ||
|
|
6bf683409f | ||
|
|
634bfb1eae | ||
|
|
c9d597d2b1 | ||
|
|
92c616f717 | ||
|
|
b1e8d29ae0 | ||
|
|
e0cef55fcd | ||
|
|
9aacf5c7db | ||
|
|
4858749a20 | ||
|
|
b53c0b982a | ||
|
|
2aac9ff9c5 | ||
|
|
bdaa70ada5 | ||
|
|
ca6abdfc61 | ||
|
|
e18593fe88 | ||
|
|
08401aff26 | ||
|
|
cddc67ad69 | ||
|
|
586af2a435 | ||
|
|
97091fab60 | ||
|
|
5f2cf75be8 | ||
|
|
37c7b81c95 | ||
|
|
779a266713 | ||
|
|
ebaedc6f05 | ||
|
|
08a392787a | ||
|
|
883bf74bad | ||
|
|
80de3335b7 | ||
|
|
794072bdf8 | ||
|
|
e28fdb9cb1 | ||
|
|
7873d1c6b3 | ||
|
|
f8c069132e | ||
|
|
c53833072f | ||
|
|
16d7cf7a52 | ||
|
|
3b9c2b9729 | ||
|
|
b230fea66f | ||
|
|
f9b6501af1 | ||
|
|
0cd8e3701d | ||
|
|
8545672839 | ||
|
|
4a6c4b95f1 | ||
|
|
c893bc21ab | ||
|
|
c8c5cbf8cc | ||
|
|
54ef8a1e19 | ||
|
|
82df218bcb | ||
|
|
f613ad6c05 | ||
|
|
4e732d0379 | ||
|
|
ad8b5c9d29 | ||
|
|
1ab4bf14dc | ||
|
|
068a0b4576 | ||
|
|
5f694f228f | ||
|
|
2ffe49130d | ||
|
|
f7d18ef976 | ||
|
|
8233eb6007 | ||
|
|
de4693cdf3 | ||
|
|
8fb235c3f5 | ||
|
|
6feb00dcd9 | ||
|
|
4fc1847a70 | ||
|
|
e7f16f07f7 | ||
|
|
d9551b3106 | ||
|
|
854428795b | ||
|
|
5c3b4bd987 | ||
|
|
9d686d3e52 | ||
|
|
b62e00b935 | ||
|
|
634745c818 | ||
|
|
490c6d9a28 | ||
|
|
e6dd9978cb | ||
|
|
240a5613a5 | ||
|
|
fb96cbcaaf | ||
|
|
b58f879db7 | ||
|
|
1585c6095e | ||
|
|
30a630412d | ||
|
|
e5ca79cd51 | ||
|
|
148353aca4 | ||
|
|
7a098ce740 | ||
|
|
f3b9831a0c | ||
|
|
01454674c8 | ||
|
|
5093b18ecc | ||
|
|
c16cfd0668 | ||
|
|
1674cd5db9 | ||
|
|
f111ab48fb | ||
|
|
65309854ac | ||
|
|
cf0aff8c40 | ||
|
|
16b768485a | ||
|
|
c2e7b533d3 | ||
|
|
539859f1ab | ||
|
|
d1d40a9a76 | ||
|
|
52162a5604 | ||
|
|
84e84207a5 | ||
|
|
a4e1745eca | ||
|
|
5931979b74 | ||
|
|
a412a65315 | ||
|
|
25da0674bb | ||
|
|
c823bf4fbb | ||
|
|
cfd0e67a6b | ||
|
|
94f3af57f1 | ||
|
|
0050a3fe6c | ||
|
|
0e65ddee37 | ||
|
|
101f7de889 | ||
|
|
a21c1ff92d | ||
|
|
f9bb2e498e | ||
|
|
f6bb803be5 | ||
|
|
204a88c171 | ||
|
|
012afc0708 | ||
|
|
cf089abb64 | ||
|
|
40e463cdc1 | ||
|
|
6a1284a5ca | ||
|
|
60522ee474 | ||
|
|
0045641db7 | ||
|
|
97b5b1b669 | ||
|
|
448de8519a | ||
|
|
95e1fe0446 | ||
|
|
47254be254 | ||
|
|
3da15bfd19 | ||
|
|
c79db2581b | ||
|
|
93b86a8800 | ||
|
|
56ba7ef411 | ||
|
|
e2117fd8a9 | ||
|
|
e817c76e38 | ||
|
|
65e059a7d2 | ||
|
|
f661025acc | ||
|
|
2fe1b1e16e | ||
|
|
665ffe3984 | ||
|
|
c3401047e0 | ||
|
|
996177ceaf | ||
|
|
09e998523f | ||
|
|
38e8d27416 | ||
|
|
813de04596 | ||
|
|
3c0eae4180 | ||
|
|
99424a9f53 | ||
|
|
51d118fdb5 | ||
|
|
a26509a1fd | ||
|
|
0db70379e8 | ||
|
|
c612b5d17b | ||
|
|
1b469cce49 | ||
|
|
c62cbd2d77 | ||
|
|
da1bc19052 | ||
|
|
8e2246ec5c | ||
|
|
5cf6945bcb | ||
|
|
7132ae47d7 | ||
|
|
96bde4ad03 | ||
|
|
85b9bdd8f4 | ||
|
|
e5a85108d7 | ||
|
|
aaa3a8ebda | ||
|
|
9ceb766a67 | ||
|
|
f894240fbb | ||
|
|
4bac8e2ebe | ||
|
|
d3ad1fd384 | ||
|
|
ed0e4189e4 | ||
|
|
5f147242be | ||
|
|
c7b4c22b94 | ||
|
|
667bacf81e | ||
|
|
e896b0ea96 | ||
|
|
3b0a5a8b41 | ||
|
|
81d39e9bde | ||
|
|
59eabf03a6 | ||
|
|
4fc4987c43 | ||
|
|
dbf5bb149a | ||
|
|
38acbf6970 | ||
|
|
ad4bd91751 | ||
|
|
1a6f290979 | ||
|
|
c6e02a620a | ||
|
|
90efaa41c2 | ||
|
|
51a73ad8b5 | ||
|
|
93f2387d1b | ||
|
|
207a631a65 | ||
|
|
74e60e98b7 | ||
|
|
6ca6bf7457 | ||
|
|
ea49cdeb17 | ||
|
|
bcc8b1917a | ||
|
|
67b12d4416 | ||
|
|
dc02daecee | ||
|
|
6212b38ea6 | ||
|
|
ce7e5726e7 | ||
|
|
1156b3f22e | ||
|
|
66097f3507 | ||
|
|
146f02d314 | ||
|
|
77e5882ce7 | ||
|
|
d44850a4f3 | ||
|
|
e6e692dc43 | ||
|
|
dc65770ae3 | ||
|
|
8c15802277 | ||
|
|
3d666d9929 | ||
|
|
c0c960ec2e | ||
|
|
2bb4db127c | ||
|
|
dd1a5d4f58 | ||
|
|
961fdf7029 | ||
|
|
ff3d2b006f | ||
|
|
d7efbad3df | ||
|
|
d70995bb1a | ||
|
|
1b58e95dce | ||
|
|
780f70d5c6 | ||
|
|
a01e1f96fa | ||
|
|
3adb443ca5 | ||
|
|
dc9ff3a004 | ||
|
|
f879f4f432 | ||
|
|
183f831a7e | ||
|
|
3c361be621 | ||
|
|
5764d44faf | ||
|
|
ce86677faa | ||
|
|
17d93b39d5 | ||
|
|
1357b85a3d | ||
|
|
c67cb5c604 | ||
|
|
6ffb0df6cb | ||
|
|
032b40c78d | ||
|
|
742aea115b | ||
|
|
0f16cd46f9 | ||
|
|
eea64cf272 | ||
|
|
3d5c5f8054 | ||
|
|
748099a324 | ||
|
|
9c1d050d8b | ||
|
|
4ef834e295 | ||
|
|
50bead172b | ||
|
|
ee4508af03 | ||
|
|
6638959d66 | ||
|
|
f266a9d25d | ||
|
|
6cb8e1a518 | ||
|
|
85375359ed | ||
|
|
17c9ba2c68 | ||
|
|
34334ad8b8 | ||
|
|
4527714094 | ||
|
|
b43d74dbb7 | ||
|
|
0c4140ff02 | ||
|
|
b074c02fa2 | ||
|
|
7636c97f9f | ||
|
|
4211ab28b0 | ||
|
|
cecfbc7e20 | ||
|
|
31a6aff932 | ||
|
|
c4a67c4356 | ||
|
|
9f5765134b | ||
|
|
0c5b51d2ac | ||
|
|
31c4198cee | ||
|
|
a94c4b4ce4 | ||
|
|
088dd99ec1 | ||
|
|
4f9b907b4d | ||
|
|
e7dc3e6062 | ||
|
|
53055e78eb | ||
|
|
9a9c34aa18 | ||
|
|
2f1384840c | ||
|
|
b462e55799 | ||
|
|
263ec60ba6 | ||
|
|
8d44a57200 | ||
|
|
976722c129 | ||
|
|
4a9447d344 | ||
|
|
ac2ed9aa87 | ||
|
|
51cf241dae | ||
|
|
f239b8e26d | ||
|
|
ab9f4161ea | ||
|
|
1d10e649b7 | ||
|
|
a95b40aac6 | ||
|
|
1b5777821f | ||
|
|
587d469193 | ||
|
|
6c3e30f3ea | ||
|
|
91dbcae9e2 | ||
|
|
fb5db4f6b7 | ||
|
|
a1e029a825 | ||
|
|
54dbf9b6f2 | ||
|
|
b6344951fe | ||
|
|
ea972118b5 | ||
|
|
2fccd4799d | ||
|
|
a3937e4d0d | ||
|
|
acb022d5d5 | ||
|
|
b6e46d6101 | ||
|
|
347f75f804 | ||
|
|
4005ced505 | ||
|
|
c748c35b37 | ||
|
|
a73836ca43 | ||
|
|
b357fab326 | ||
|
|
16a3000451 | ||
|
|
c4fac2d179 | ||
|
|
60d11a6eba | ||
|
|
93edbda984 | ||
|
|
75bd94d757 | ||
|
|
a5cf0b6ef5 | ||
|
|
506280d645 | ||
|
|
2f79b4fde7 | ||
|
|
846f98628d | ||
|
|
eed9be5a9e | ||
|
|
27c77afafc | ||
|
|
c156b2f817 | ||
|
|
96fcc5df6b | ||
|
|
3c775fd5de | ||
|
|
75e9ee6528 | ||
|
|
ea0ee96398 | ||
|
|
3fd4a2841a | ||
|
|
6ecf44c87a | ||
|
|
031e8cea6e | ||
|
|
757fbb0124 | ||
|
|
d65e3f73df | ||
|
|
5b13105d58 | ||
|
|
c8745afb37 | ||
|
|
85189c0bde | ||
|
|
a8635bade2 | ||
|
|
4560572ff2 | ||
|
|
c7fa57fd14 | ||
|
|
54965fdf2e | ||
|
|
30361aa685 | ||
|
|
8be93b72c4 | ||
|
|
c7e9d645e5 | ||
|
|
fe6a3c89f3 | ||
|
|
686a32cbc0 | ||
|
|
55d7397ff5 | ||
|
|
3714d554df | ||
|
|
0415de853b | ||
|
|
0ba1e8f904 | ||
|
|
58bfcb0953 | ||
|
|
fa281d89d2 | ||
|
|
908b10dae0 | ||
|
|
ea03f9def0 | ||
|
|
3510799fca | ||
|
|
1f4a63d6db | ||
|
|
dd94a444d2 | ||
|
|
50fafc9ff6 | ||
|
|
47fc6a689d | ||
|
|
86175a1827 | ||
|
|
6d6e25df4e | ||
|
|
5402ed112c | ||
|
|
235b83d02e | ||
|
|
6ffbec969a | ||
|
|
185ea71646 | ||
|
|
69fcc3acd7 | ||
|
|
da94cf4aab | ||
|
|
8799cf95b4 | ||
|
|
108ce18d51 | ||
|
|
f67ea5d010 | ||
|
|
dd857aeccf | ||
|
|
44b1acd385 | ||
|
|
b2f6018e05 | ||
|
|
bca6507f11 | ||
|
|
30332c2ba5 | ||
|
|
17919d7503 | ||
|
|
42237ced80 | ||
|
|
737021ccdf | ||
|
|
927b5bc8cc | ||
|
|
2a56d892d7 | ||
|
|
e3d5eaf388 | ||
|
|
5d1f50117b | ||
|
|
f6a2ec15d7 | ||
|
|
64a8d56725 | ||
|
|
71caea32e7 | ||
|
|
17993ef9ff | ||
|
|
b0aa4ef4c8 | ||
|
|
5c4aaa27d9 | ||
|
|
ee837889db | ||
|
|
a5f4cba72f |
58
HISTORY.md
58
HISTORY.md
@@ -1,5 +1,63 @@
|
||||
# Release history
|
||||
|
||||
### Aider v0.76.1
|
||||
|
||||
- Added ignore_permission_denied option to file watcher to prevent errors when accessing restricted files, by Yutaka Matsubara.
|
||||
- Aider wrote 0% of the code in this release.
|
||||
|
||||
### Aider v0.76.0
|
||||
|
||||
- Improved support for thinking/reasoningmodels:
|
||||
- Added `--thinking-tokens` CLI option to control token budget for models that support thinking.
|
||||
- Display thinking/reasoning content from LLMs which return it.
|
||||
- Enhanced handling of reasoning tags to better clean up model responses.
|
||||
- Added deprecation warning for `remove_reasoning` setting, now replaced by `reasoning_tag`.
|
||||
- Aider will notify you when it's completed the last request and needs your input:
|
||||
- Added [notifications when LLM responses are ready](https://aider.chat/docs/usage/notifications.html) with `--notifications` flag.
|
||||
- Specify desktop notification command with `--notifications-command`.
|
||||
- Added support for QWQ 32B.
|
||||
- Switch to `tree-sitter-language-pack` for tree sitter support.
|
||||
- Improved error handling for EOF (Ctrl+D) in user input prompts.
|
||||
- Added helper function to ensure hex color values have a # prefix.
|
||||
- Fixed handling of Git errors when reading staged files.
|
||||
- Improved SSL verification control for model information requests.
|
||||
- Improved empty LLM response handling with clearer warning messages.
|
||||
- Fixed Git identity retrieval to respect global configuration, by Akira Komamura.
|
||||
- Offer to install dependencies for Bedrock and Vertex AI models.
|
||||
- Deprecated model shortcut args (like --4o, --opus) in favor of the --model flag.
|
||||
- Aider wrote 85% of the code in this release.
|
||||
|
||||
### Aider v0.75.3
|
||||
|
||||
- Support for V3 free on OpenRouter: `--model openrouter/deepseek/deepseek-chat:free`.
|
||||
|
||||
### Aider v0.75.2
|
||||
|
||||
- Added support for Claude 3.7 Sonnet models on OpenRouter, Bedrock and Vertex AI.
|
||||
- Updated default model to Claude 3.7 Sonnet on OpenRouter.
|
||||
- Added support for GPT-4.5-preview model.
|
||||
- Added support for Claude 3.7 Sonnet:beta on OpenRouter.
|
||||
- Fixed weak_model_name patterns to match main model name patterns for some models.
|
||||
|
||||
### Aider v0.75.1
|
||||
|
||||
- Added support for `openrouter/anthropic/claude-3.7-sonnet`
|
||||
|
||||
### Aider v0.75.0
|
||||
|
||||
- Basic support for Claude 3.7 Sonnet
|
||||
- Use `--model sonnet` to use the new 3.7
|
||||
- Thinking support coming soon.
|
||||
- Bugfix to `/editor` command.
|
||||
- Aider wrote 46% of the code in this release.
|
||||
|
||||
### Aider v0.74.3
|
||||
|
||||
- Downgrade streamlit dependency to avoid threading bug.
|
||||
- Added support for tree-sitter language pack.
|
||||
- Added openrouter/o3-mini-high model configuration.
|
||||
- Added build.gradle.kts to special files for Kotlin project support, by Lucas Shadler.
|
||||
|
||||
### Aider v0.74.2
|
||||
|
||||
- Prevent more than one cache warming thread from becoming active.
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
Aider lets you pair program with LLMs,
|
||||
to edit code in your local git repository.
|
||||
Start a new project or work with an existing code base.
|
||||
Aider works best with Claude 3.5 Sonnet, DeepSeek R1 & Chat V3, OpenAI o1, o3-mini & GPT-4o. Aider can [connect to almost any LLM, including local models](https://aider.chat/docs/llms.html).
|
||||
Aider works best with Claude 3.7 Sonnet, DeepSeek R1 & Chat V3, OpenAI o1, o3-mini & GPT-4o. Aider can [connect to almost any LLM, including local models](https://aider.chat/docs/llms.html).
|
||||
|
||||
<!-- SCREENCAST START -->
|
||||
<p align="center">
|
||||
@@ -54,14 +54,14 @@ cd /to/your/project
|
||||
# Work with DeepSeek via DeepSeek's API
|
||||
aider --model deepseek --api-key deepseek=your-key-goes-here
|
||||
|
||||
# Work with Claude 3.5 Sonnet via Anthropic's API
|
||||
# Work with Claude 3.7 Sonnet via Anthropic's API
|
||||
aider --model sonnet --api-key anthropic=your-key-goes-here
|
||||
|
||||
# Work with GPT-4o via OpenAI's API
|
||||
aider --model gpt-4o --api-key openai=your-key-goes-here
|
||||
|
||||
# Work with Sonnet via OpenRouter's API
|
||||
aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here
|
||||
aider --model openrouter/anthropic/claude-3.7-sonnet --api-key openrouter=your-key-goes-here
|
||||
|
||||
# Work with DeepSeek via OpenRouter's API
|
||||
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here
|
||||
@@ -95,7 +95,7 @@ Pair program with AI.
|
||||
- [Add images to the chat](https://aider.chat/docs/usage/images-urls.html) (GPT-4o, Claude 3.5 Sonnet, etc).
|
||||
- [Add URLs to the chat](https://aider.chat/docs/usage/images-urls.html) and aider will read their content.
|
||||
- [Code with your voice](https://aider.chat/docs/usage/voice.html).
|
||||
- Aider works best with Claude 3.5 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html).
|
||||
- Aider works best with Claude 3.7 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html).
|
||||
|
||||
|
||||
## Top tier performance
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from packaging import version
|
||||
|
||||
__version__ = "0.74.3.dev"
|
||||
__version__ = "0.76.2.dev"
|
||||
safe_version = __version__
|
||||
|
||||
try:
|
||||
|
||||
121
aider/args.py
121
aider/args.py
@@ -12,6 +12,7 @@ from aider.args_formatter import (
|
||||
MarkdownHelpFormatter,
|
||||
YamlHelpFormatter,
|
||||
)
|
||||
from aider.deprecated import add_deprecated_model_args
|
||||
|
||||
from .dump import dump # noqa: F401
|
||||
|
||||
@@ -38,98 +39,6 @@ def get_parser(default_config_files, git_root):
|
||||
default=None,
|
||||
help="Specify the model to use for the main chat",
|
||||
)
|
||||
opus_model = "claude-3-opus-20240229"
|
||||
group.add_argument(
|
||||
"--opus",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=opus_model,
|
||||
help=f"Use {opus_model} model for the main chat",
|
||||
)
|
||||
sonnet_model = "claude-3-5-sonnet-20241022"
|
||||
group.add_argument(
|
||||
"--sonnet",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=sonnet_model,
|
||||
help=f"Use {sonnet_model} model for the main chat",
|
||||
)
|
||||
haiku_model = "claude-3-5-haiku-20241022"
|
||||
group.add_argument(
|
||||
"--haiku",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=haiku_model,
|
||||
help=f"Use {haiku_model} model for the main chat",
|
||||
)
|
||||
gpt_4_model = "gpt-4-0613"
|
||||
group.add_argument(
|
||||
"--4",
|
||||
"-4",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=gpt_4_model,
|
||||
help=f"Use {gpt_4_model} model for the main chat",
|
||||
)
|
||||
gpt_4o_model = "gpt-4o"
|
||||
group.add_argument(
|
||||
"--4o",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=gpt_4o_model,
|
||||
help=f"Use {gpt_4o_model} model for the main chat",
|
||||
)
|
||||
gpt_4o_mini_model = "gpt-4o-mini"
|
||||
group.add_argument(
|
||||
"--mini",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=gpt_4o_mini_model,
|
||||
help=f"Use {gpt_4o_mini_model} model for the main chat",
|
||||
)
|
||||
gpt_4_turbo_model = "gpt-4-1106-preview"
|
||||
group.add_argument(
|
||||
"--4-turbo",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=gpt_4_turbo_model,
|
||||
help=f"Use {gpt_4_turbo_model} model for the main chat",
|
||||
)
|
||||
gpt_3_model_name = "gpt-3.5-turbo"
|
||||
group.add_argument(
|
||||
"--35turbo",
|
||||
"--35-turbo",
|
||||
"--3",
|
||||
"-3",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=gpt_3_model_name,
|
||||
help=f"Use {gpt_3_model_name} model for the main chat",
|
||||
)
|
||||
deepseek_model = "deepseek/deepseek-chat"
|
||||
group.add_argument(
|
||||
"--deepseek",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=deepseek_model,
|
||||
help=f"Use {deepseek_model} model for the main chat",
|
||||
)
|
||||
o1_mini_model = "o1-mini"
|
||||
group.add_argument(
|
||||
"--o1-mini",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=o1_mini_model,
|
||||
help=f"Use {o1_mini_model} model for the main chat",
|
||||
)
|
||||
o1_preview_model = "o1-preview"
|
||||
group.add_argument(
|
||||
"--o1-preview",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=o1_preview_model,
|
||||
help=f"Use {o1_preview_model} model for the main chat",
|
||||
)
|
||||
|
||||
##########
|
||||
group = parser.add_argument_group("API Keys and settings")
|
||||
@@ -208,6 +117,11 @@ def get_parser(default_config_files, git_root):
|
||||
type=str,
|
||||
help="Set the reasoning_effort API parameter (default: not set)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--thinking-tokens",
|
||||
type=int,
|
||||
help="Set the thinking token budget for models that support it (default: not set)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--verify-ssl",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
@@ -813,6 +727,24 @@ def get_parser(default_config_files, git_root):
|
||||
default=False,
|
||||
help="Enable/disable multi-line input mode with Meta-Enter to submit (default: False)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--notifications",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help=(
|
||||
"Enable/disable terminal bell notifications when LLM responses are ready (default:"
|
||||
" False)"
|
||||
),
|
||||
)
|
||||
group.add_argument(
|
||||
"--notifications-command",
|
||||
metavar="COMMAND",
|
||||
default=None,
|
||||
help=(
|
||||
"Specify a command to run for notifications instead of the terminal bell. If not"
|
||||
" specified, a default command for your OS may be used."
|
||||
),
|
||||
)
|
||||
group.add_argument(
|
||||
"--detect-urls",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
@@ -824,6 +756,11 @@ def get_parser(default_config_files, git_root):
|
||||
help="Specify which editor to use for the /editor command",
|
||||
)
|
||||
|
||||
##########
|
||||
group = parser.add_argument_group("Deprecated model settings")
|
||||
# Add deprecated model shortcut arguments
|
||||
add_deprecated_model_args(parser, group)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
|
||||
@@ -148,11 +148,14 @@ class YamlHelpFormatter(argparse.HelpFormatter):
|
||||
parts.append(f"#{switch}: xxx")
|
||||
parts.append("## Specify multiple values like this:")
|
||||
parts.append(f"#{switch}:")
|
||||
parts.append(f"# - xxx")
|
||||
parts.append(f"# - yyy")
|
||||
parts.append(f"# - zzz")
|
||||
parts.append("# - xxx")
|
||||
parts.append("# - yyy")
|
||||
parts.append("# - zzz")
|
||||
else:
|
||||
parts.append(f"#{switch}: xxx\n")
|
||||
if switch.endswith("color"):
|
||||
parts.append(f'#{switch}: "xxx"\n')
|
||||
else:
|
||||
parts.append(f"#{switch}: xxx\n")
|
||||
|
||||
###
|
||||
# parts.append(str(action))
|
||||
|
||||
@@ -28,6 +28,12 @@ from aider.io import ConfirmGroup, InputOutput
|
||||
from aider.linter import Linter
|
||||
from aider.llm import litellm
|
||||
from aider.models import RETRY_TIMEOUT
|
||||
from aider.reasoning_tags import (
|
||||
REASONING_TAG,
|
||||
format_reasoning_content,
|
||||
remove_reasoning_content,
|
||||
replace_reasoning_tags,
|
||||
)
|
||||
from aider.repo import ANY_GIT_ERROR, GitRepo
|
||||
from aider.repomap import RepoMap
|
||||
from aider.run_cmd import run_cmd
|
||||
@@ -375,6 +381,10 @@ class Coder:
|
||||
self.pretty = self.io.pretty
|
||||
|
||||
self.main_model = main_model
|
||||
# Set the reasoning tag name based on model settings or default
|
||||
self.reasoning_tag_name = (
|
||||
self.main_model.reasoning_tag if self.main_model.reasoning_tag else REASONING_TAG
|
||||
)
|
||||
|
||||
self.stream = stream and main_model.streaming
|
||||
|
||||
@@ -1280,6 +1290,9 @@ class Coder:
|
||||
def send_message(self, inp):
|
||||
self.event("message_send_starting")
|
||||
|
||||
# Notify IO that LLM processing is starting
|
||||
self.io.llm_started()
|
||||
|
||||
self.cur_messages += [
|
||||
dict(role="user", content=inp),
|
||||
]
|
||||
@@ -1369,11 +1382,14 @@ class Coder:
|
||||
self.mdstream = None
|
||||
|
||||
self.partial_response_content = self.get_multi_response_content_in_progress(True)
|
||||
self.partial_response_content = self.main_model.remove_reasoning_content(
|
||||
self.partial_response_content
|
||||
)
|
||||
self.remove_reasoning_content()
|
||||
self.multi_response_content = ""
|
||||
|
||||
###
|
||||
# print()
|
||||
# print("=" * 20)
|
||||
# dump(self.partial_response_content)
|
||||
|
||||
self.io.tool_output()
|
||||
|
||||
self.show_usage_report()
|
||||
@@ -1623,6 +1639,9 @@ class Coder:
|
||||
return prompts.added_files.format(fnames=", ".join(added_fnames))
|
||||
|
||||
def send(self, messages, model=None, functions=None):
|
||||
self.got_reasoning_content = False
|
||||
self.ended_reasoning_content = False
|
||||
|
||||
if not model:
|
||||
model = self.main_model
|
||||
|
||||
@@ -1690,6 +1709,14 @@ class Coder:
|
||||
except AttributeError as func_err:
|
||||
show_func_err = func_err
|
||||
|
||||
try:
|
||||
reasoning_content = completion.choices[0].message.reasoning_content
|
||||
except AttributeError:
|
||||
try:
|
||||
reasoning_content = completion.choices[0].message.reasoning
|
||||
except AttributeError:
|
||||
reasoning_content = None
|
||||
|
||||
try:
|
||||
self.partial_response_content = completion.choices[0].message.content or ""
|
||||
except AttributeError as content_err:
|
||||
@@ -1708,6 +1735,15 @@ class Coder:
|
||||
raise Exception("No data found in LLM response!")
|
||||
|
||||
show_resp = self.render_incremental_response(True)
|
||||
|
||||
if reasoning_content:
|
||||
formatted_reasoning = format_reasoning_content(
|
||||
reasoning_content, self.reasoning_tag_name
|
||||
)
|
||||
show_resp = formatted_reasoning + show_resp
|
||||
|
||||
show_resp = replace_reasoning_tags(show_resp, self.reasoning_tag_name)
|
||||
|
||||
self.io.assistant_output(show_resp, pretty=self.show_pretty())
|
||||
|
||||
if (
|
||||
@@ -1717,6 +1753,8 @@ class Coder:
|
||||
raise FinishReasonLength()
|
||||
|
||||
def show_send_output_stream(self, completion):
|
||||
received_content = False
|
||||
|
||||
for chunk in completion:
|
||||
if len(chunk.choices) == 0:
|
||||
continue
|
||||
@@ -1735,19 +1773,46 @@ class Coder:
|
||||
self.partial_response_function_call[k] += v
|
||||
else:
|
||||
self.partial_response_function_call[k] = v
|
||||
received_content = True
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
text = ""
|
||||
|
||||
try:
|
||||
text = chunk.choices[0].delta.content
|
||||
if text:
|
||||
self.partial_response_content += text
|
||||
reasoning_content = chunk.choices[0].delta.reasoning_content
|
||||
except AttributeError:
|
||||
text = None
|
||||
try:
|
||||
reasoning_content = chunk.choices[0].delta.reasoning
|
||||
except AttributeError:
|
||||
reasoning_content = None
|
||||
|
||||
if reasoning_content:
|
||||
if not self.got_reasoning_content:
|
||||
text += f"<{REASONING_TAG}>\n\n"
|
||||
text += reasoning_content
|
||||
self.got_reasoning_content = True
|
||||
received_content = True
|
||||
|
||||
try:
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
if self.got_reasoning_content and not self.ended_reasoning_content:
|
||||
text += f"\n\n</{self.reasoning_tag_name}>\n\n"
|
||||
self.ended_reasoning_content = True
|
||||
|
||||
text += content
|
||||
received_content = True
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
self.partial_response_content += text
|
||||
|
||||
if self.show_pretty():
|
||||
self.live_incremental_response(False)
|
||||
elif text:
|
||||
# Apply reasoning tag formatting
|
||||
text = replace_reasoning_tags(text, self.reasoning_tag_name)
|
||||
try:
|
||||
sys.stdout.write(text)
|
||||
except UnicodeEncodeError:
|
||||
@@ -1759,13 +1824,26 @@ class Coder:
|
||||
sys.stdout.flush()
|
||||
yield text
|
||||
|
||||
if not received_content:
|
||||
self.io.tool_warning("Empty response received from LLM. Check your provider account?")
|
||||
|
||||
def live_incremental_response(self, final):
|
||||
show_resp = self.render_incremental_response(final)
|
||||
# Apply any reasoning tag formatting
|
||||
show_resp = replace_reasoning_tags(show_resp, self.reasoning_tag_name)
|
||||
self.mdstream.update(show_resp, final=final)
|
||||
|
||||
def render_incremental_response(self, final):
|
||||
return self.get_multi_response_content_in_progress()
|
||||
|
||||
def remove_reasoning_content(self):
|
||||
"""Remove reasoning content from the model's response."""
|
||||
|
||||
self.partial_response_content = remove_reasoning_content(
|
||||
self.partial_response_content,
|
||||
self.reasoning_tag_name,
|
||||
)
|
||||
|
||||
def calculate_and_show_tokens_and_cost(self, messages, completion=None):
|
||||
prompt_tokens = 0
|
||||
completion_tokens = 0
|
||||
|
||||
@@ -404,6 +404,7 @@ class Commands:
|
||||
|
||||
fence = "`" * 3
|
||||
|
||||
file_res = []
|
||||
# files
|
||||
for fname in self.coder.abs_fnames:
|
||||
relative_fname = self.coder.get_rel_fname(fname)
|
||||
@@ -414,7 +415,7 @@ class Commands:
|
||||
# approximate
|
||||
content = f"{relative_fname}\n{fence}\n" + content + "{fence}\n"
|
||||
tokens = self.coder.main_model.token_count(content)
|
||||
res.append((tokens, f"{relative_fname}", "/drop to remove"))
|
||||
file_res.append((tokens, f"{relative_fname}", "/drop to remove"))
|
||||
|
||||
# read-only files
|
||||
for fname in self.coder.abs_read_only_fnames:
|
||||
@@ -424,7 +425,10 @@ class Commands:
|
||||
# approximate
|
||||
content = f"{relative_fname}\n{fence}\n" + content + "{fence}\n"
|
||||
tokens = self.coder.main_model.token_count(content)
|
||||
res.append((tokens, f"{relative_fname} (read-only)", "/drop to remove"))
|
||||
file_res.append((tokens, f"{relative_fname} (read-only)", "/drop to remove"))
|
||||
|
||||
file_res.sort()
|
||||
res.extend(file_res)
|
||||
|
||||
self.io.tool_output(
|
||||
f"Approximate context window usage for {self.coder.main_model.name}, in tokens:"
|
||||
|
||||
125
aider/deprecated.py
Normal file
125
aider/deprecated.py
Normal file
@@ -0,0 +1,125 @@
|
||||
def add_deprecated_model_args(parser, group):
|
||||
"""Add deprecated model shortcut arguments to the argparse parser."""
|
||||
opus_model = "claude-3-opus-20240229"
|
||||
group.add_argument(
|
||||
"--opus",
|
||||
action="store_true",
|
||||
help=f"Use {opus_model} model for the main chat (deprecated, use --model)",
|
||||
default=False,
|
||||
)
|
||||
sonnet_model = "anthropic/claude-3-7-sonnet-20250219"
|
||||
group.add_argument(
|
||||
"--sonnet",
|
||||
action="store_true",
|
||||
help=f"Use {sonnet_model} model for the main chat (deprecated, use --model)",
|
||||
default=False,
|
||||
)
|
||||
haiku_model = "claude-3-5-haiku-20241022"
|
||||
group.add_argument(
|
||||
"--haiku",
|
||||
action="store_true",
|
||||
help=f"Use {haiku_model} model for the main chat (deprecated, use --model)",
|
||||
default=False,
|
||||
)
|
||||
gpt_4_model = "gpt-4-0613"
|
||||
group.add_argument(
|
||||
"--4",
|
||||
"-4",
|
||||
action="store_true",
|
||||
help=f"Use {gpt_4_model} model for the main chat (deprecated, use --model)",
|
||||
default=False,
|
||||
)
|
||||
gpt_4o_model = "gpt-4o"
|
||||
group.add_argument(
|
||||
"--4o",
|
||||
action="store_true",
|
||||
help=f"Use {gpt_4o_model} model for the main chat (deprecated, use --model)",
|
||||
default=False,
|
||||
)
|
||||
gpt_4o_mini_model = "gpt-4o-mini"
|
||||
group.add_argument(
|
||||
"--mini",
|
||||
action="store_true",
|
||||
help=f"Use {gpt_4o_mini_model} model for the main chat (deprecated, use --model)",
|
||||
default=False,
|
||||
)
|
||||
gpt_4_turbo_model = "gpt-4-1106-preview"
|
||||
group.add_argument(
|
||||
"--4-turbo",
|
||||
action="store_true",
|
||||
help=f"Use {gpt_4_turbo_model} model for the main chat (deprecated, use --model)",
|
||||
default=False,
|
||||
)
|
||||
gpt_3_model_name = "gpt-3.5-turbo"
|
||||
group.add_argument(
|
||||
"--35turbo",
|
||||
"--35-turbo",
|
||||
"--3",
|
||||
"-3",
|
||||
action="store_true",
|
||||
help=f"Use {gpt_3_model_name} model for the main chat (deprecated, use --model)",
|
||||
default=False,
|
||||
)
|
||||
deepseek_model = "deepseek/deepseek-chat"
|
||||
group.add_argument(
|
||||
"--deepseek",
|
||||
action="store_true",
|
||||
help=f"Use {deepseek_model} model for the main chat (deprecated, use --model)",
|
||||
default=False,
|
||||
)
|
||||
o1_mini_model = "o1-mini"
|
||||
group.add_argument(
|
||||
"--o1-mini",
|
||||
action="store_true",
|
||||
help=f"Use {o1_mini_model} model for the main chat (deprecated, use --model)",
|
||||
default=False,
|
||||
)
|
||||
o1_preview_model = "o1-preview"
|
||||
group.add_argument(
|
||||
"--o1-preview",
|
||||
action="store_true",
|
||||
help=f"Use {o1_preview_model} model for the main chat (deprecated, use --model)",
|
||||
default=False,
|
||||
)
|
||||
|
||||
|
||||
def handle_deprecated_model_args(args, io):
|
||||
"""Handle deprecated model shortcut arguments and provide appropriate warnings."""
|
||||
# Define model mapping
|
||||
model_map = {
|
||||
"opus": "claude-3-opus-20240229",
|
||||
"sonnet": "anthropic/claude-3-7-sonnet-20250219",
|
||||
"haiku": "claude-3-5-haiku-20241022",
|
||||
"4": "gpt-4-0613",
|
||||
"4o": "gpt-4o",
|
||||
"mini": "gpt-4o-mini",
|
||||
"4_turbo": "gpt-4-1106-preview",
|
||||
"35turbo": "gpt-3.5-turbo",
|
||||
"deepseek": "deepseek/deepseek-chat",
|
||||
"o1_mini": "o1-mini",
|
||||
"o1_preview": "o1-preview",
|
||||
}
|
||||
|
||||
# Check if any deprecated args are used
|
||||
for arg_name, model_name in model_map.items():
|
||||
arg_name_clean = arg_name.replace("-", "_")
|
||||
if hasattr(args, arg_name_clean) and getattr(args, arg_name_clean):
|
||||
# Find preferred name to display in warning
|
||||
from aider.models import MODEL_ALIASES
|
||||
|
||||
display_name = model_name
|
||||
# Check if there's a shorter alias for this model
|
||||
for alias, full_name in MODEL_ALIASES.items():
|
||||
if full_name == model_name:
|
||||
display_name = alias
|
||||
break
|
||||
|
||||
# Show the warning
|
||||
io.tool_warning(
|
||||
f"The --{arg_name.replace('_', '-')} flag is deprecated and will be removed in a"
|
||||
f" future version. Please use --model {display_name} instead."
|
||||
)
|
||||
|
||||
# Set the model
|
||||
args.model = model_name
|
||||
break
|
||||
@@ -10,12 +10,13 @@ This module provides functionality to:
|
||||
|
||||
import os
|
||||
import platform
|
||||
import shlex
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from aider.dump import dump # noqa
|
||||
|
||||
DEFAULT_EDITOR_NIX = "vi"
|
||||
DEFAULT_EDITOR_OS_X = "vim"
|
||||
DEFAULT_EDITOR_WINDOWS = "notepad"
|
||||
@@ -87,13 +88,13 @@ def get_environment_editor(default=None):
|
||||
|
||||
def discover_editor(editor_override=None):
|
||||
"""
|
||||
Discovers and returns the appropriate editor command as a list of arguments.
|
||||
Discovers and returns the appropriate editor command.
|
||||
|
||||
Handles cases where the editor command includes arguments, including quoted arguments
|
||||
with spaces (e.g. 'vim -c "set noswapfile"').
|
||||
|
||||
:return: A list of command parts ready for subprocess execution
|
||||
:rtype: list[str]
|
||||
:return: The editor command as a string
|
||||
:rtype: str
|
||||
"""
|
||||
system = platform.system()
|
||||
if system == "Windows":
|
||||
@@ -102,14 +103,13 @@ def discover_editor(editor_override=None):
|
||||
default_editor = DEFAULT_EDITOR_OS_X
|
||||
else:
|
||||
default_editor = DEFAULT_EDITOR_NIX
|
||||
|
||||
if editor_override:
|
||||
editor = editor_override
|
||||
else:
|
||||
editor = get_environment_editor(default_editor)
|
||||
try:
|
||||
return shlex.split(editor)
|
||||
except ValueError as e:
|
||||
raise RuntimeError(f"Invalid editor command format '{editor}': {e}")
|
||||
|
||||
return editor
|
||||
|
||||
|
||||
def pipe_editor(input_data="", suffix=None, editor=None):
|
||||
@@ -128,9 +128,10 @@ def pipe_editor(input_data="", suffix=None, editor=None):
|
||||
:rtype: str
|
||||
"""
|
||||
filepath = write_temp_file(input_data, suffix)
|
||||
command_parts = discover_editor(editor)
|
||||
command_parts.append(filepath)
|
||||
subprocess.call(command_parts)
|
||||
command_str = discover_editor(editor)
|
||||
command_str += " " + filepath
|
||||
|
||||
subprocess.call(command_str, shell=True)
|
||||
with open(filepath, "r") as f:
|
||||
output_data = f.read()
|
||||
try:
|
||||
|
||||
164
aider/io.py
164
aider/io.py
@@ -1,7 +1,9 @@
|
||||
import base64
|
||||
import functools
|
||||
import os
|
||||
import shutil
|
||||
import signal
|
||||
import subprocess
|
||||
import time
|
||||
import webbrowser
|
||||
from collections import defaultdict
|
||||
@@ -34,6 +36,20 @@ from aider.mdstream import MarkdownStream
|
||||
from .dump import dump # noqa: F401
|
||||
from .utils import is_image_file
|
||||
|
||||
# Constants
|
||||
NOTIFICATION_MESSAGE = "Aider is waiting for your input"
|
||||
|
||||
|
||||
def ensure_hash_prefix(color):
|
||||
"""Ensure hex color values have a # prefix."""
|
||||
if not color:
|
||||
return color
|
||||
if isinstance(color, str) and color.strip() and not color.startswith("#"):
|
||||
# Check if it's a valid hex color (3 or 6 hex digits)
|
||||
if all(c in "0123456789ABCDEFabcdef" for c in color) and len(color) in (3, 6):
|
||||
return f"#{color}"
|
||||
return color
|
||||
|
||||
|
||||
def restore_multiline(func):
|
||||
"""Decorator to restore multiline mode after function execution"""
|
||||
@@ -196,6 +212,8 @@ class InputOutput:
|
||||
num_error_outputs = 0
|
||||
num_user_asks = 0
|
||||
clipboard_watcher = None
|
||||
bell_on_next_input = False
|
||||
notifications_command = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -224,25 +242,40 @@ class InputOutput:
|
||||
file_watcher=None,
|
||||
multiline_mode=False,
|
||||
root=".",
|
||||
notifications=False,
|
||||
notifications_command=None,
|
||||
):
|
||||
self.placeholder = None
|
||||
self.interrupted = False
|
||||
self.never_prompts = set()
|
||||
self.editingmode = editingmode
|
||||
self.multiline_mode = multiline_mode
|
||||
self.bell_on_next_input = False
|
||||
self.notifications = notifications
|
||||
if notifications and notifications_command is None:
|
||||
self.notifications_command = self.get_default_notification_command()
|
||||
else:
|
||||
self.notifications_command = notifications_command
|
||||
|
||||
no_color = os.environ.get("NO_COLOR")
|
||||
if no_color is not None and no_color != "":
|
||||
pretty = False
|
||||
|
||||
self.user_input_color = user_input_color if pretty else None
|
||||
self.tool_output_color = tool_output_color if pretty else None
|
||||
self.tool_error_color = tool_error_color if pretty else None
|
||||
self.tool_warning_color = tool_warning_color if pretty else None
|
||||
self.assistant_output_color = assistant_output_color
|
||||
self.completion_menu_color = completion_menu_color if pretty else None
|
||||
self.completion_menu_bg_color = completion_menu_bg_color if pretty else None
|
||||
self.completion_menu_current_color = completion_menu_current_color if pretty else None
|
||||
self.completion_menu_current_bg_color = completion_menu_current_bg_color if pretty else None
|
||||
self.user_input_color = ensure_hash_prefix(user_input_color) if pretty else None
|
||||
self.tool_output_color = ensure_hash_prefix(tool_output_color) if pretty else None
|
||||
self.tool_error_color = ensure_hash_prefix(tool_error_color) if pretty else None
|
||||
self.tool_warning_color = ensure_hash_prefix(tool_warning_color) if pretty else None
|
||||
self.assistant_output_color = ensure_hash_prefix(assistant_output_color)
|
||||
self.completion_menu_color = ensure_hash_prefix(completion_menu_color) if pretty else None
|
||||
self.completion_menu_bg_color = (
|
||||
ensure_hash_prefix(completion_menu_bg_color) if pretty else None
|
||||
)
|
||||
self.completion_menu_current_color = (
|
||||
ensure_hash_prefix(completion_menu_current_color) if pretty else None
|
||||
)
|
||||
self.completion_menu_current_bg_color = (
|
||||
ensure_hash_prefix(completion_menu_current_bg_color) if pretty else None
|
||||
)
|
||||
|
||||
self.code_theme = code_theme
|
||||
|
||||
@@ -444,6 +477,9 @@ class InputOutput:
|
||||
):
|
||||
self.rule()
|
||||
|
||||
# Ring the bell if needed
|
||||
self.ring_bell()
|
||||
|
||||
rel_fnames = list(rel_fnames)
|
||||
show = ""
|
||||
if rel_fnames:
|
||||
@@ -696,6 +732,9 @@ class InputOutput:
|
||||
):
|
||||
self.num_user_asks += 1
|
||||
|
||||
# Ring the bell if needed
|
||||
self.ring_bell()
|
||||
|
||||
question_id = (question, subject)
|
||||
|
||||
if question_id in self.never_prompts:
|
||||
@@ -750,14 +789,19 @@ class InputOutput:
|
||||
self.user_input(f"{question}{res}", log_only=False)
|
||||
else:
|
||||
while True:
|
||||
if self.prompt_session:
|
||||
res = self.prompt_session.prompt(
|
||||
question,
|
||||
style=style,
|
||||
complete_while_typing=False,
|
||||
)
|
||||
else:
|
||||
res = input(question)
|
||||
try:
|
||||
if self.prompt_session:
|
||||
res = self.prompt_session.prompt(
|
||||
question,
|
||||
style=style,
|
||||
complete_while_typing=False,
|
||||
)
|
||||
else:
|
||||
res = input(question)
|
||||
except EOFError:
|
||||
# Treat EOF (Ctrl+D) as if the user pressed Enter
|
||||
res = default
|
||||
break
|
||||
|
||||
if not res:
|
||||
res = default
|
||||
@@ -801,6 +845,9 @@ class InputOutput:
|
||||
def prompt_ask(self, question, default="", subject=None):
|
||||
self.num_user_asks += 1
|
||||
|
||||
# Ring the bell if needed
|
||||
self.ring_bell()
|
||||
|
||||
if subject:
|
||||
self.tool_output()
|
||||
self.tool_output(subject, bold=True)
|
||||
@@ -812,15 +859,19 @@ class InputOutput:
|
||||
elif self.yes is False:
|
||||
res = "no"
|
||||
else:
|
||||
if self.prompt_session:
|
||||
res = self.prompt_session.prompt(
|
||||
question + " ",
|
||||
default=default,
|
||||
style=style,
|
||||
complete_while_typing=True,
|
||||
)
|
||||
else:
|
||||
res = input(question + " ")
|
||||
try:
|
||||
if self.prompt_session:
|
||||
res = self.prompt_session.prompt(
|
||||
question + " ",
|
||||
default=default,
|
||||
style=style,
|
||||
complete_while_typing=True,
|
||||
)
|
||||
else:
|
||||
res = input(question + " ")
|
||||
except EOFError:
|
||||
# Treat EOF (Ctrl+D) as if the user pressed Enter
|
||||
res = default
|
||||
|
||||
hist = f"{question.strip()} {res.strip()}"
|
||||
self.append_chat_history(hist, linebreak=True, blockquote=True)
|
||||
@@ -882,6 +933,10 @@ class InputOutput:
|
||||
return mdStream
|
||||
|
||||
def assistant_output(self, message, pretty=None):
|
||||
if not message:
|
||||
self.tool_warning("Empty response received from LLM. Check your provider account?")
|
||||
return
|
||||
|
||||
show_resp = message
|
||||
|
||||
# Coder will force pretty off if fence is not triple-backticks
|
||||
@@ -893,7 +948,7 @@ class InputOutput:
|
||||
message, style=self.assistant_output_color, code_theme=self.code_theme
|
||||
)
|
||||
else:
|
||||
show_resp = Text(message or "<no response>")
|
||||
show_resp = Text(message or "(empty response)")
|
||||
|
||||
self.console.print(show_resp)
|
||||
|
||||
@@ -904,6 +959,61 @@ class InputOutput:
|
||||
def print(self, message=""):
|
||||
print(message)
|
||||
|
||||
def llm_started(self):
|
||||
"""Mark that the LLM has started processing, so we should ring the bell on next input"""
|
||||
self.bell_on_next_input = True
|
||||
|
||||
def get_default_notification_command(self):
|
||||
"""Return a default notification command based on the operating system."""
|
||||
import platform
|
||||
|
||||
system = platform.system()
|
||||
|
||||
if system == "Darwin": # macOS
|
||||
# Check for terminal-notifier first
|
||||
if shutil.which("terminal-notifier"):
|
||||
return f"terminal-notifier -title 'Aider' -message '{NOTIFICATION_MESSAGE}'"
|
||||
# Fall back to osascript
|
||||
return (
|
||||
f'osascript -e \'display notification "{NOTIFICATION_MESSAGE}" with title "Aider"\''
|
||||
)
|
||||
elif system == "Linux":
|
||||
# Check for common Linux notification tools
|
||||
for cmd in ["notify-send", "zenity"]:
|
||||
if shutil.which(cmd):
|
||||
if cmd == "notify-send":
|
||||
return f"notify-send 'Aider' '{NOTIFICATION_MESSAGE}'"
|
||||
elif cmd == "zenity":
|
||||
return f"zenity --notification --text='{NOTIFICATION_MESSAGE}'"
|
||||
return None # No known notification tool found
|
||||
elif system == "Windows":
|
||||
# PowerShell notification
|
||||
return (
|
||||
"powershell -command"
|
||||
" \"[System.Reflection.Assembly]::LoadWithPartialName('System.Windows.Forms');"
|
||||
f" [System.Windows.Forms.MessageBox]::Show('{NOTIFICATION_MESSAGE}',"
|
||||
" 'Aider')\""
|
||||
)
|
||||
|
||||
return None # Unknown system
|
||||
|
||||
def ring_bell(self):
|
||||
"""Ring the terminal bell if needed and clear the flag"""
|
||||
if self.bell_on_next_input and self.notifications:
|
||||
if self.notifications_command:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
self.notifications_command, shell=True, capture_output=True
|
||||
)
|
||||
if result.returncode != 0 and result.stderr:
|
||||
error_msg = result.stderr.decode("utf-8", errors="replace")
|
||||
self.tool_warning(f"Failed to run notifications command: {error_msg}")
|
||||
except Exception as e:
|
||||
self.tool_warning(f"Failed to run notifications command: {e}")
|
||||
else:
|
||||
print("\a", end="", flush=True) # Ring the bell
|
||||
self.bell_on_next_input = False # Clear the flag
|
||||
|
||||
def toggle_multiline_mode(self):
|
||||
"""Toggle between normal and multiline input modes"""
|
||||
self.multiline_mode = not self.multiline_mode
|
||||
|
||||
@@ -8,7 +8,7 @@ from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
from grep_ast import TreeContext, filename_to_lang
|
||||
from tree_sitter_languages import get_parser # noqa: E402
|
||||
from grep_ast.tsl import get_parser # noqa: E402
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.run_cmd import run_cmd_subprocess # noqa: F401
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import configparser
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
@@ -25,6 +24,7 @@ from aider.coders import Coder
|
||||
from aider.coders.base_coder import UnknownEditFormat
|
||||
from aider.commands import Commands, SwitchCoder
|
||||
from aider.copypaste import ClipboardWatcher
|
||||
from aider.deprecated import handle_deprecated_model_args
|
||||
from aider.format_settings import format_settings, scrub_sensitive_info
|
||||
from aider.history import ChatSummary
|
||||
from aider.io import InputOutput
|
||||
@@ -126,17 +126,8 @@ def setup_git(git_root, io):
|
||||
if not repo:
|
||||
return
|
||||
|
||||
user_name = None
|
||||
user_email = None
|
||||
with repo.config_reader() as config:
|
||||
try:
|
||||
user_name = config.get_value("user", "name", None)
|
||||
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||
pass
|
||||
try:
|
||||
user_email = config.get_value("user", "email", None)
|
||||
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||
pass
|
||||
user_name = repo.git.config("--default", "", "--get", "user.name") or None
|
||||
user_email = repo.git.config("--default", "", "--get", "user.email") or None
|
||||
|
||||
if user_name and user_email:
|
||||
return repo.working_tree_dir
|
||||
@@ -507,6 +498,8 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
litellm._load_litellm()
|
||||
litellm._lazy_module.client_session = httpx.Client(verify=False)
|
||||
litellm._lazy_module.aclient_session = httpx.AsyncClient(verify=False)
|
||||
# Set verify_ssl on the model_info_manager
|
||||
models.model_info_manager.set_verify_ssl(False)
|
||||
|
||||
if args.timeout:
|
||||
models.request_timeout = args.timeout
|
||||
@@ -555,6 +548,8 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
editingmode=editing_mode,
|
||||
fancy_input=args.fancy_input,
|
||||
multiline_mode=args.multiline,
|
||||
notifications=args.notifications,
|
||||
notifications_command=args.notifications_command,
|
||||
)
|
||||
|
||||
io = get_io(args.pretty)
|
||||
@@ -594,6 +589,9 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
|
||||
if args.openai_api_key:
|
||||
os.environ["OPENAI_API_KEY"] = args.openai_api_key
|
||||
|
||||
# Handle deprecated model shortcut args
|
||||
handle_deprecated_model_args(args, io)
|
||||
if args.openai_api_base:
|
||||
os.environ["OPENAI_API_BASE"] = args.openai_api_base
|
||||
if args.openai_api_version:
|
||||
@@ -751,7 +749,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
model_key_pairs = [
|
||||
("ANTHROPIC_API_KEY", "sonnet"),
|
||||
("DEEPSEEK_API_KEY", "deepseek"),
|
||||
("OPENROUTER_API_KEY", "openrouter/anthropic/claude-3.5-sonnet"),
|
||||
("OPENROUTER_API_KEY", "openrouter/anthropic/claude-3.7-sonnet"),
|
||||
("OPENAI_API_KEY", "gpt-4o"),
|
||||
("GEMINI_API_KEY", "flash"),
|
||||
]
|
||||
@@ -775,13 +773,19 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
editor_edit_format=args.editor_edit_format,
|
||||
)
|
||||
|
||||
# add --reasoning-effort cli param
|
||||
# Check if deprecated remove_reasoning is set
|
||||
if main_model.remove_reasoning is not None:
|
||||
io.tool_warning(
|
||||
"Model setting 'remove_reasoning' is deprecated, please use 'reasoning_tag' instead."
|
||||
)
|
||||
|
||||
# Set reasoning effort if specified
|
||||
if args.reasoning_effort is not None:
|
||||
if not getattr(main_model, "extra_params", None):
|
||||
main_model.extra_params = {}
|
||||
if "extra_body" not in main_model.extra_params:
|
||||
main_model.extra_params["extra_body"] = {}
|
||||
main_model.extra_params["extra_body"]["reasoning_effort"] = args.reasoning_effort
|
||||
main_model.set_reasoning_effort(args.reasoning_effort)
|
||||
|
||||
# Set thinking tokens if specified
|
||||
if args.thinking_tokens is not None:
|
||||
main_model.set_thinking_tokens(args.thinking_tokens)
|
||||
|
||||
if args.copy_paste and args.edit_format is None:
|
||||
if main_model.edit_format in ("diff", "whole"):
|
||||
|
||||
@@ -5,7 +5,6 @@ import json
|
||||
import math
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
from dataclasses import dataclass, fields
|
||||
@@ -19,6 +18,7 @@ from PIL import Image
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.llm import litellm
|
||||
from aider.sendchat import ensure_alternating_roles, sanity_check_messages
|
||||
from aider.utils import check_pip_install_extra
|
||||
|
||||
RETRY_TIMEOUT = 60
|
||||
|
||||
@@ -76,7 +76,7 @@ ANTHROPIC_MODELS = [ln.strip() for ln in ANTHROPIC_MODELS.splitlines() if ln.str
|
||||
# Mapping of model aliases to their canonical names
|
||||
MODEL_ALIASES = {
|
||||
# Claude models
|
||||
"sonnet": "claude-3-5-sonnet-20241022",
|
||||
"sonnet": "anthropic/claude-3-7-sonnet-20250219",
|
||||
"haiku": "claude-3-5-haiku-20241022",
|
||||
"opus": "claude-3-opus-20240229",
|
||||
# GPT models
|
||||
@@ -113,7 +113,8 @@ class ModelSettings:
|
||||
streaming: bool = True
|
||||
editor_model_name: Optional[str] = None
|
||||
editor_edit_format: Optional[str] = None
|
||||
remove_reasoning: Optional[str] = None
|
||||
reasoning_tag: Optional[str] = None
|
||||
remove_reasoning: Optional[str] = None # Deprecated alias for reasoning_tag
|
||||
system_prompt_prefix: Optional[str] = None
|
||||
|
||||
|
||||
@@ -137,9 +138,16 @@ class ModelInfoManager:
|
||||
self.cache_file = self.cache_dir / "model_prices_and_context_window.json"
|
||||
self.content = None
|
||||
self.local_model_metadata = {}
|
||||
self._load_cache()
|
||||
self.verify_ssl = True
|
||||
self._cache_loaded = False
|
||||
|
||||
def set_verify_ssl(self, verify_ssl):
|
||||
self.verify_ssl = verify_ssl
|
||||
|
||||
def _load_cache(self):
|
||||
if self._cache_loaded:
|
||||
return
|
||||
|
||||
try:
|
||||
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
if self.cache_file.exists():
|
||||
@@ -149,11 +157,14 @@ class ModelInfoManager:
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
self._cache_loaded = True
|
||||
|
||||
def _update_cache(self):
|
||||
try:
|
||||
import requests
|
||||
|
||||
response = requests.get(self.MODEL_INFO_URL, timeout=5)
|
||||
# Respect the --no-verify-ssl switch
|
||||
response = requests.get(self.MODEL_INFO_URL, timeout=5, verify=self.verify_ssl)
|
||||
if response.status_code == 200:
|
||||
self.content = response.json()
|
||||
try:
|
||||
@@ -173,6 +184,9 @@ class ModelInfoManager:
|
||||
if data:
|
||||
return data
|
||||
|
||||
# Ensure cache is loaded before checking content
|
||||
self._load_cache()
|
||||
|
||||
if not self.content:
|
||||
self._update_cache()
|
||||
|
||||
@@ -259,6 +273,11 @@ class Model(ModelSettings):
|
||||
val = getattr(source, field.name)
|
||||
setattr(self, field.name, val)
|
||||
|
||||
# Handle backward compatibility: if remove_reasoning is set but reasoning_tag isn't,
|
||||
# use remove_reasoning's value for reasoning_tag
|
||||
if self.reasoning_tag is None and self.remove_reasoning is not None:
|
||||
self.reasoning_tag = self.remove_reasoning
|
||||
|
||||
def configure_model_settings(self, model):
|
||||
# Look for exact model match
|
||||
exact_match = False
|
||||
@@ -331,7 +350,8 @@ class Model(ModelSettings):
|
||||
self.use_repo_map = True
|
||||
self.examples_as_sys_msg = True
|
||||
self.use_temperature = False
|
||||
self.remove_reasoning = "think"
|
||||
self.reasoning_tag = "think"
|
||||
self.reasoning_tag = "think"
|
||||
return # <--
|
||||
|
||||
if ("llama3" in model or "llama-3" in model) and "70b" in model:
|
||||
@@ -380,6 +400,16 @@ class Model(ModelSettings):
|
||||
self.use_repo_map = True
|
||||
return # <--
|
||||
|
||||
if "qwq" in model and "32b" in model and "preview" not in model:
|
||||
self.edit_format = "diff"
|
||||
self.editor_edit_format = "editor-diff"
|
||||
self.use_repo_map = True
|
||||
self.reasoning_tag = "think"
|
||||
self.examples_as_sys_msg = True
|
||||
self.use_temperature = 0.6
|
||||
self.extra_params = dict(top_p=0.95)
|
||||
return # <--
|
||||
|
||||
# use the defaults
|
||||
if self.edit_format == "diff":
|
||||
self.use_repo_map = True
|
||||
@@ -559,6 +589,23 @@ class Model(ModelSettings):
|
||||
map_tokens = max(map_tokens, 1024)
|
||||
return map_tokens
|
||||
|
||||
def set_reasoning_effort(self, effort):
|
||||
"""Set the reasoning effort parameter for models that support it"""
|
||||
if effort is not None:
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
if "extra_body" not in self.extra_params:
|
||||
self.extra_params["extra_body"] = {}
|
||||
self.extra_params["extra_body"]["reasoning_effort"] = effort
|
||||
|
||||
def set_thinking_tokens(self, num):
|
||||
"""Set the thinking token budget for models that support it"""
|
||||
if num is not None:
|
||||
self.use_temperature = False
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
self.extra_params["thinking"] = {"type": "enabled", "budget_tokens": num}
|
||||
|
||||
def is_deepseek_r1(self):
|
||||
name = self.name.lower()
|
||||
if "deepseek" not in name:
|
||||
@@ -609,14 +656,6 @@ class Model(ModelSettings):
|
||||
res = litellm.completion(**kwargs)
|
||||
return hash_object, res
|
||||
|
||||
def remove_reasoning_content(self, res):
|
||||
if not self.remove_reasoning:
|
||||
return res
|
||||
|
||||
pattern = f"<{self.remove_reasoning}>.*?</{self.remove_reasoning}>"
|
||||
res = re.sub(pattern, "", res, flags=re.DOTALL).strip()
|
||||
return res
|
||||
|
||||
def simple_send_with_retries(self, messages):
|
||||
from aider.exceptions import LiteLLMExceptions
|
||||
|
||||
@@ -637,7 +676,9 @@ class Model(ModelSettings):
|
||||
if not response or not hasattr(response, "choices") or not response.choices:
|
||||
return None
|
||||
res = response.choices[0].message.content
|
||||
return self.remove_reasoning_content(res)
|
||||
from aider.reasoning_tags import remove_reasoning_content
|
||||
|
||||
return remove_reasoning_content(res, self.reasoning_tag)
|
||||
|
||||
except litellm_ex.exceptions_tuple() as err:
|
||||
ex_info = litellm_ex.get_ex_info(err)
|
||||
@@ -760,6 +801,9 @@ def sanity_check_model(io, model):
|
||||
show = True
|
||||
io.tool_warning(f"Warning for {model}: Unknown which environment variables are required.")
|
||||
|
||||
# Check for model-specific dependencies
|
||||
check_for_dependencies(io, model.name)
|
||||
|
||||
if not model.info:
|
||||
show = True
|
||||
io.tool_warning(
|
||||
@@ -775,6 +819,30 @@ def sanity_check_model(io, model):
|
||||
return show
|
||||
|
||||
|
||||
def check_for_dependencies(io, model_name):
|
||||
"""
|
||||
Check for model-specific dependencies and install them if needed.
|
||||
|
||||
Args:
|
||||
io: The IO object for user interaction
|
||||
model_name: The name of the model to check dependencies for
|
||||
"""
|
||||
# Check if this is a Bedrock model and ensure boto3 is installed
|
||||
if model_name.startswith("bedrock/"):
|
||||
check_pip_install_extra(
|
||||
io, "boto3", "AWS Bedrock models require the boto3 package.", ["boto3"]
|
||||
)
|
||||
|
||||
# Check if this is a Vertex AI model and ensure google-cloud-aiplatform is installed
|
||||
elif model_name.startswith("vertex_ai/"):
|
||||
check_pip_install_extra(
|
||||
io,
|
||||
"google.cloud.aiplatform",
|
||||
"Google Vertex AI models require the google-cloud-aiplatform package.",
|
||||
["google-cloud-aiplatform"],
|
||||
)
|
||||
|
||||
|
||||
def fuzzy_match_models(name):
|
||||
name = name.lower()
|
||||
|
||||
|
||||
26
aider/queries/tree-sitter-language-pack/csharp-tags.scm
Normal file
26
aider/queries/tree-sitter-language-pack/csharp-tags.scm
Normal file
@@ -0,0 +1,26 @@
|
||||
; Based on https://github.com/tree-sitter/tree-sitter-c-sharp/blob/master/queries/tags.scm
|
||||
; MIT License.
|
||||
|
||||
(class_declaration name: (identifier) @name.definition.class) @definition.class
|
||||
|
||||
(class_declaration (base_list (_) @name.reference.class)) @reference.class
|
||||
|
||||
(interface_declaration name: (identifier) @name.definition.interface) @definition.interface
|
||||
|
||||
(interface_declaration (base_list (_) @name.reference.interface)) @reference.interface
|
||||
|
||||
(method_declaration name: (identifier) @name.definition.method) @definition.method
|
||||
|
||||
(object_creation_expression type: (identifier) @name.reference.class) @reference.class
|
||||
|
||||
(type_parameter_constraints_clause (identifier) @name.reference.class) @reference.class
|
||||
|
||||
(type_parameter_constraint (type type: (identifier) @name.reference.class)) @reference.class
|
||||
|
||||
(variable_declaration type: (identifier) @name.reference.class) @reference.class
|
||||
|
||||
(invocation_expression function: (member_access_expression name: (identifier) @name.reference.send)) @reference.send
|
||||
|
||||
(namespace_declaration name: (identifier) @name.definition.module) @definition.module
|
||||
|
||||
(namespace_declaration name: (identifier) @name.definition.module) @module
|
||||
88
aider/queries/tree-sitter-language-pack/javascript-tags.scm
Normal file
88
aider/queries/tree-sitter-language-pack/javascript-tags.scm
Normal file
@@ -0,0 +1,88 @@
|
||||
(
|
||||
(comment)* @doc
|
||||
.
|
||||
(method_definition
|
||||
name: (property_identifier) @name.definition.method) @definition.method
|
||||
(#not-eq? @name.definition.method "constructor")
|
||||
(#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$")
|
||||
(#select-adjacent! @doc @definition.method)
|
||||
)
|
||||
|
||||
(
|
||||
(comment)* @doc
|
||||
.
|
||||
[
|
||||
(class
|
||||
name: (_) @name.definition.class)
|
||||
(class_declaration
|
||||
name: (_) @name.definition.class)
|
||||
] @definition.class
|
||||
(#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$")
|
||||
(#select-adjacent! @doc @definition.class)
|
||||
)
|
||||
|
||||
(
|
||||
(comment)* @doc
|
||||
.
|
||||
[
|
||||
(function_expression
|
||||
name: (identifier) @name.definition.function)
|
||||
(function_declaration
|
||||
name: (identifier) @name.definition.function)
|
||||
(generator_function
|
||||
name: (identifier) @name.definition.function)
|
||||
(generator_function_declaration
|
||||
name: (identifier) @name.definition.function)
|
||||
] @definition.function
|
||||
(#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$")
|
||||
(#select-adjacent! @doc @definition.function)
|
||||
)
|
||||
|
||||
(
|
||||
(comment)* @doc
|
||||
.
|
||||
(lexical_declaration
|
||||
(variable_declarator
|
||||
name: (identifier) @name.definition.function
|
||||
value: [(arrow_function) (function_expression)]) @definition.function)
|
||||
(#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$")
|
||||
(#select-adjacent! @doc @definition.function)
|
||||
)
|
||||
|
||||
(
|
||||
(comment)* @doc
|
||||
.
|
||||
(variable_declaration
|
||||
(variable_declarator
|
||||
name: (identifier) @name.definition.function
|
||||
value: [(arrow_function) (function_expression)]) @definition.function)
|
||||
(#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$")
|
||||
(#select-adjacent! @doc @definition.function)
|
||||
)
|
||||
|
||||
(assignment_expression
|
||||
left: [
|
||||
(identifier) @name.definition.function
|
||||
(member_expression
|
||||
property: (property_identifier) @name.definition.function)
|
||||
]
|
||||
right: [(arrow_function) (function_expression)]
|
||||
) @definition.function
|
||||
|
||||
(pair
|
||||
key: (property_identifier) @name.definition.function
|
||||
value: [(arrow_function) (function_expression)]) @definition.function
|
||||
|
||||
(
|
||||
(call_expression
|
||||
function: (identifier) @name.reference.call) @reference.call
|
||||
(#not-match? @name.reference.call "^(require)$")
|
||||
)
|
||||
|
||||
(call_expression
|
||||
function: (member_expression
|
||||
property: (property_identifier) @name.reference.call)
|
||||
arguments: (_) @reference.call)
|
||||
|
||||
(new_expression
|
||||
constructor: (_) @name.reference.class) @reference.class
|
||||
82
aider/reasoning_tags.py
Normal file
82
aider/reasoning_tags.py
Normal file
@@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import re
|
||||
|
||||
from aider.dump import dump # noqa
|
||||
|
||||
# Standard tag identifier
|
||||
REASONING_TAG = "thinking-content-" + "7bbeb8e1441453ad999a0bbba8a46d4b"
|
||||
# Output formatting
|
||||
REASONING_START = "--------------\n► **THINKING**"
|
||||
REASONING_END = "------------\n► **ANSWER**"
|
||||
|
||||
|
||||
def remove_reasoning_content(res, reasoning_tag):
|
||||
"""
|
||||
Remove reasoning content from text based on tags.
|
||||
|
||||
Args:
|
||||
res (str): The text to process
|
||||
reasoning_tag (str): The tag name to remove
|
||||
|
||||
Returns:
|
||||
str: Text with reasoning content removed
|
||||
"""
|
||||
if not reasoning_tag:
|
||||
return res
|
||||
|
||||
# Try to match the complete tag pattern first
|
||||
pattern = f"<{reasoning_tag}>.*?</{reasoning_tag}>"
|
||||
res = re.sub(pattern, "", res, flags=re.DOTALL).strip()
|
||||
|
||||
# If closing tag exists but opening tag might be missing, remove everything before closing
|
||||
# tag
|
||||
closing_tag = f"</{reasoning_tag}>"
|
||||
if closing_tag in res:
|
||||
# Split on the closing tag and keep everything after it
|
||||
parts = res.split(closing_tag, 1)
|
||||
res = parts[1].strip() if len(parts) > 1 else res
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def replace_reasoning_tags(text, tag_name):
|
||||
"""
|
||||
Replace opening and closing reasoning tags with standard formatting.
|
||||
Ensures exactly one blank line before START and END markers.
|
||||
|
||||
Args:
|
||||
text (str): The text containing the tags
|
||||
tag_name (str): The name of the tag to replace
|
||||
|
||||
Returns:
|
||||
str: Text with reasoning tags replaced with standard format
|
||||
"""
|
||||
if not text:
|
||||
return text
|
||||
|
||||
# Replace opening tag with proper spacing
|
||||
text = re.sub(f"\\s*<{tag_name}>\\s*", f"\n{REASONING_START}\n\n", text)
|
||||
|
||||
# Replace closing tag with proper spacing
|
||||
text = re.sub(f"\\s*</{tag_name}>\\s*", f"\n\n{REASONING_END}\n\n", text)
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def format_reasoning_content(reasoning_content, tag_name):
|
||||
"""
|
||||
Format reasoning content with appropriate tags.
|
||||
|
||||
Args:
|
||||
reasoning_content (str): The content to format
|
||||
tag_name (str): The tag name to use
|
||||
|
||||
Returns:
|
||||
str: Formatted reasoning content with tags
|
||||
"""
|
||||
if not reasoning_content:
|
||||
return ""
|
||||
|
||||
formatted = f"<{tag_name}>\n\n{reasoning_content}\n\n</{tag_name}>"
|
||||
return formatted
|
||||
@@ -145,7 +145,7 @@ class GitRepo:
|
||||
else:
|
||||
cmd += ["-a"]
|
||||
|
||||
original_user_name = self.repo.config_reader().get_value("user", "name")
|
||||
original_user_name = self.repo.git.config("--get", "user.name")
|
||||
original_committer_name_env = os.environ.get("GIT_COMMITTER_NAME")
|
||||
committer_name = f"{original_user_name} (aider)"
|
||||
|
||||
@@ -309,8 +309,11 @@ class GitRepo:
|
||||
|
||||
# Add staged files
|
||||
index = self.repo.index
|
||||
staged_files = [path for path, _ in index.entries.keys()]
|
||||
files.update(self.normalize_path(path) for path in staged_files)
|
||||
try:
|
||||
staged_files = [path for path, _ in index.entries.keys()]
|
||||
files.update(self.normalize_path(path) for path in staged_files)
|
||||
except ANY_GIT_ERROR as err:
|
||||
self.io.tool_error(f"Unable to read staged files: {err}")
|
||||
|
||||
res = [fname for fname in files if not self.ignored_file(fname)]
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ from aider.utils import Spinner
|
||||
|
||||
# tree_sitter is throwing a FutureWarning
|
||||
warnings.simplefilter("ignore", category=FutureWarning)
|
||||
from tree_sitter_languages import get_language, get_parser # noqa: E402
|
||||
from grep_ast.tsl import USING_TSL_PACK, get_language, get_parser # noqa: E402
|
||||
|
||||
Tag = namedtuple("Tag", "rel_fname fname line name kind".split())
|
||||
|
||||
@@ -31,8 +31,12 @@ Tag = namedtuple("Tag", "rel_fname fname line name kind".split())
|
||||
SQLITE_ERRORS = (sqlite3.OperationalError, sqlite3.DatabaseError, OSError)
|
||||
|
||||
|
||||
CACHE_VERSION = 3
|
||||
if USING_TSL_PACK:
|
||||
CACHE_VERSION = 4
|
||||
|
||||
|
||||
class RepoMap:
|
||||
CACHE_VERSION = 3
|
||||
TAGS_CACHE_DIR = f".aider.tags.cache.v{CACHE_VERSION}"
|
||||
|
||||
warned_files = set()
|
||||
@@ -282,10 +286,15 @@ class RepoMap:
|
||||
query = language.query(query_scm)
|
||||
captures = query.captures(tree.root_node)
|
||||
|
||||
captures = list(captures)
|
||||
|
||||
saw = set()
|
||||
for node, tag in captures:
|
||||
if USING_TSL_PACK:
|
||||
all_nodes = []
|
||||
for tag, nodes in captures.items():
|
||||
all_nodes += [(node, tag) for node in nodes]
|
||||
else:
|
||||
all_nodes = list(captures)
|
||||
|
||||
for node, tag in all_nodes:
|
||||
if tag.startswith("name.definition."):
|
||||
kind = "def"
|
||||
elif tag.startswith("name.reference."):
|
||||
@@ -422,6 +431,15 @@ class RepoMap:
|
||||
|
||||
G = nx.MultiDiGraph()
|
||||
|
||||
# Add a small self-edge for every definition that has no references
|
||||
# Helps with tree-sitter 0.23.2 with ruby, where "def greet(name)"
|
||||
# isn't counted as a def AND a ref. tree-sitter 0.24.0 does.
|
||||
for ident in defines.keys():
|
||||
if ident in references:
|
||||
continue
|
||||
for definer in defines[ident]:
|
||||
G.add_edge(definer, definer, weight=0.1, ident=ident)
|
||||
|
||||
for ident in idents:
|
||||
if progress:
|
||||
progress()
|
||||
@@ -732,8 +750,27 @@ def get_random_color():
|
||||
|
||||
def get_scm_fname(lang):
|
||||
# Load the tags queries
|
||||
if USING_TSL_PACK:
|
||||
subdir = "tree-sitter-language-pack"
|
||||
try:
|
||||
path = resources.files(__package__).joinpath(
|
||||
"queries",
|
||||
subdir,
|
||||
f"{lang}-tags.scm",
|
||||
)
|
||||
if path.exists():
|
||||
return path
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# Fall back to tree-sitter-languages
|
||||
subdir = "tree-sitter-languages"
|
||||
try:
|
||||
return resources.files(__package__).joinpath("queries", f"tree-sitter-{lang}-tags.scm")
|
||||
return resources.files(__package__).joinpath(
|
||||
"queries",
|
||||
subdir,
|
||||
f"{lang}-tags.scm",
|
||||
)
|
||||
except KeyError:
|
||||
return
|
||||
|
||||
|
||||
@@ -47,6 +47,22 @@
|
||||
//"supports_tool_choice": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"openrouter/deepseek/deepseek-chat:free": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 64000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.0,
|
||||
"input_cost_per_token_cache_hit": 0.0,
|
||||
"cache_read_input_token_cost": 0.00,
|
||||
"cache_creation_input_token_cost": 0.0,
|
||||
"output_cost_per_token": 0.0,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
//"supports_function_calling": true,
|
||||
"supports_assistant_prefill": true,
|
||||
//"supports_tool_choice": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"fireworks_ai/accounts/fireworks/models/deepseek-r1": {
|
||||
"max_tokens": 160000,
|
||||
"max_input_tokens": 128000,
|
||||
@@ -97,6 +113,22 @@
|
||||
"supports_system_messages": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"openrouter/openai/o3-mini-high": {
|
||||
"max_tokens": 100000,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 100000,
|
||||
"input_cost_per_token": 0.0000011,
|
||||
"output_cost_per_token": 0.0000044,
|
||||
"cache_read_input_token_cost": 0.00000055,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"openrouter/openai/gpt-4o-mini": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
@@ -115,4 +147,98 @@
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true
|
||||
},
|
||||
"claude-3-7-sonnet-20250219": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"cache_creation_input_token_cost": 0.00000375,
|
||||
"cache_read_input_token_cost": 0.0000003,
|
||||
"litellm_provider": "anthropic",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true,
|
||||
"deprecation_date": "2025-10-01",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"anthropic/claude-3-7-sonnet-20250219": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"cache_creation_input_token_cost": 0.00000375,
|
||||
"cache_read_input_token_cost": 0.0000003,
|
||||
"litellm_provider": "anthropic",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true,
|
||||
"deprecation_date": "2025-10-01",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"openrouter/anthropic/claude-3.7-sonnet": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"cache_creation_input_token_cost": 0.00000375,
|
||||
"cache_read_input_token_cost": 0.0000003,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"tool_use_system_prompt_tokens": 159,
|
||||
"supports_assistant_prefill": true,
|
||||
"supports_pdf_input": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_response_schema": true,
|
||||
"deprecation_date": "2025-10-01",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"gpt-4.5-preview": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.000075,
|
||||
"output_cost_per_token": 0.00015,
|
||||
"cache_read_input_token_cost": 0.0000375,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"openai/gpt-4.5-preview": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.000075,
|
||||
"output_cost_per_token": 0.00015,
|
||||
"cache_read_input_token_cost": 0.0000375,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
}
|
||||
|
||||
@@ -184,6 +184,156 @@
|
||||
editor_model_name: anthropic/claude-3-5-sonnet-20241022
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: anthropic/claude-3-7-sonnet-20250219
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-3-7-sonnet-20250219
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: anthropic/claude-3-7-sonnet-latest
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-3-7-sonnet-latest
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: claude-3-7-sonnet-20250219
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: claude-3-7-sonnet-20250219
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: claude-3-7-sonnet-latest
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: claude-3-7-sonnet-latest
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: bedrock_converse/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: bedrock_converse/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: vertex_ai/claude-3-7-sonnet@20250219
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 64000
|
||||
editor_model_name: vertex_ai/claude-3-7-sonnet@20250219
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: vertex_ai-anthropic_models/vertex_ai/claude-3-7-sonnet@20250219
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 64000
|
||||
editor_model_name: vertex_ai-anthropic_models/vertex_ai/claude-3-7-sonnet@20250219
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/anthropic/claude-3.7-sonnet
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: openrouter/anthropic/claude-3.7-sonnet
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/anthropic/claude-3.7-sonnet:beta
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: openrouter/anthropic/claude-3.7-sonnet
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
@@ -397,8 +547,8 @@
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
include_reasoning: true
|
||||
caches_by_default: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/deepseek/deepseek-chat
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
@@ -435,6 +585,18 @@
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
|
||||
- name: openrouter/deepseek/deepseek-chat:free
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/deepseek/deepseek-chat:free
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/deepseek/deepseek-chat:free
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: deepseek/deepseek-coder
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
@@ -618,7 +780,7 @@
|
||||
streaming: true
|
||||
editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
editor_edit_format: editor-diff
|
||||
remove_reasoning: think
|
||||
reasoning_tag: think
|
||||
extra_params:
|
||||
max_tokens: 160000
|
||||
|
||||
@@ -657,6 +819,15 @@
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
|
||||
- name: openrouter/openai/o3-mini-high
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
|
||||
- name: azure/o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4o-mini
|
||||
@@ -666,4 +837,49 @@
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
|
||||
|
||||
- name: gpt-4.5-preview
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openai/gpt-4.5-preview
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
editor_model_name: openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: fireworks_ai/accounts/fireworks/models/qwq-32b
|
||||
reasoning_tag: think
|
||||
edit_format: diff
|
||||
weak_model_name: fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct
|
||||
use_repo_map: true
|
||||
editor_model_name: fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct
|
||||
editor_edit_format: editor-diff
|
||||
reminder: user
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: 0.6
|
||||
extra_params:
|
||||
max_tokens: 32000
|
||||
top_p: 0.95
|
||||
|
||||
- name: groq/qwen-qwq-32b
|
||||
reasoning_tag: think
|
||||
edit_format: diff
|
||||
weak_model_name: groq/qwen-2.5-coder-32b
|
||||
use_repo_map: true
|
||||
editor_model_name: groq/qwen-2.5-coder-32b
|
||||
editor_edit_format: editor-diff
|
||||
use_temperature: 0.6
|
||||
extra_params:
|
||||
max_tokens: 128000
|
||||
top_p: 0.95
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ ROOT_IMPORTANT_FILES = [
|
||||
"composer.lock",
|
||||
"pom.xml",
|
||||
"build.gradle",
|
||||
"build.gradle.kts",
|
||||
"build.sbt",
|
||||
"go.mod",
|
||||
"go.sum",
|
||||
|
||||
@@ -140,7 +140,10 @@ class FileWatcher:
|
||||
roots_to_watch = self.get_roots_to_watch()
|
||||
|
||||
for changes in watch(
|
||||
*roots_to_watch, watch_filter=self.filter_func, stop_event=self.stop_event
|
||||
*roots_to_watch,
|
||||
watch_filter=self.filter_func,
|
||||
stop_event=self.stop_event,
|
||||
ignore_permission_denied=True,
|
||||
):
|
||||
if self.handle_changes(changes):
|
||||
return
|
||||
|
||||
@@ -23,6 +23,64 @@ cog.out(text)
|
||||
]]]-->
|
||||
|
||||
|
||||
### Aider v0.76.1
|
||||
|
||||
- Added ignore_permission_denied option to file watcher to prevent errors when accessing restricted files, by Yutaka Matsubara.
|
||||
- Aider wrote 0% of the code in this release.
|
||||
|
||||
### Aider v0.76.0
|
||||
|
||||
- Improved support for thinking/reasoningmodels:
|
||||
- Added `--thinking-tokens` CLI option to control token budget for models that support thinking.
|
||||
- Display thinking/reasoning content from LLMs which return it.
|
||||
- Enhanced handling of reasoning tags to better clean up model responses.
|
||||
- Added deprecation warning for `remove_reasoning` setting, now replaced by `reasoning_tag`.
|
||||
- Aider will notify you when it's completed the last request and needs your input:
|
||||
- Added [notifications when LLM responses are ready](https://aider.chat/docs/usage/notifications.html) with `--notifications` flag.
|
||||
- Specify desktop notification command with `--notifications-command`.
|
||||
- Added support for QWQ 32B.
|
||||
- Switch to `tree-sitter-language-pack` for tree sitter support.
|
||||
- Improved error handling for EOF (Ctrl+D) in user input prompts.
|
||||
- Added helper function to ensure hex color values have a # prefix.
|
||||
- Fixed handling of Git errors when reading staged files.
|
||||
- Improved SSL verification control for model information requests.
|
||||
- Improved empty LLM response handling with clearer warning messages.
|
||||
- Fixed Git identity retrieval to respect global configuration, by Akira Komamura.
|
||||
- Offer to install dependencies for Bedrock and Vertex AI models.
|
||||
- Deprecated model shortcut args (like --4o, --opus) in favor of the --model flag.
|
||||
- Aider wrote 85% of the code in this release.
|
||||
|
||||
### Aider v0.75.3
|
||||
|
||||
- Support for V3 free on OpenRouter: `--model openrouter/deepseek/deepseek-chat:free`.
|
||||
|
||||
### Aider v0.75.2
|
||||
|
||||
- Added support for Claude 3.7 Sonnet models on OpenRouter, Bedrock and Vertex AI.
|
||||
- Updated default model to Claude 3.7 Sonnet on OpenRouter.
|
||||
- Added support for GPT-4.5-preview model.
|
||||
- Added support for Claude 3.7 Sonnet:beta on OpenRouter.
|
||||
- Fixed weak_model_name patterns to match main model name patterns for some models.
|
||||
|
||||
### Aider v0.75.1
|
||||
|
||||
- Added support for `openrouter/anthropic/claude-3.7-sonnet`
|
||||
|
||||
### Aider v0.75.0
|
||||
|
||||
- Basic support for Claude 3.7 Sonnet
|
||||
- Use `--model sonnet` to use the new 3.7
|
||||
- Thinking support coming soon.
|
||||
- Bugfix to `/editor` command.
|
||||
- Aider wrote 46% of the code in this release.
|
||||
|
||||
### Aider v0.74.3
|
||||
|
||||
- Downgrade streamlit dependency to avoid threading bug.
|
||||
- Added support for tree-sitter language pack.
|
||||
- Added openrouter/o3-mini-high model configuration.
|
||||
- Added build.gradle.kts to special files for Kotlin project support, by Lucas Shadler.
|
||||
|
||||
### Aider v0.74.2
|
||||
|
||||
- Prevent more than one cache warming thread from becoming active.
|
||||
|
||||
@@ -3723,7 +3723,7 @@
|
||||
Titusz Pan: 9
|
||||
start_tag: v0.71.0
|
||||
total_lines: 283
|
||||
- aider_percentage: 69.44
|
||||
- aider_percentage: 37.47
|
||||
aider_total: 284
|
||||
end_date: '2025-01-31'
|
||||
end_tag: v0.73.0
|
||||
@@ -3746,6 +3746,10 @@
|
||||
aider/models.py:
|
||||
Paul Gauthier: 8
|
||||
Paul Gauthier (aider): 33
|
||||
aider/resources/model-settings.yml:
|
||||
Paul Gauthier: 334
|
||||
kennyfrc: 11
|
||||
xqyz: 4
|
||||
aider/sendchat.py:
|
||||
Mir Adnan ALI: 28
|
||||
Paul Gauthier: 11
|
||||
@@ -3770,12 +3774,13 @@
|
||||
Paul Gauthier (aider): 77
|
||||
grand_total:
|
||||
Mir Adnan ALI: 28
|
||||
Paul Gauthier: 96
|
||||
Paul Gauthier: 430
|
||||
Paul Gauthier (aider): 284
|
||||
xqyz: 1
|
||||
kennyfrc: 11
|
||||
xqyz: 5
|
||||
start_tag: v0.72.0
|
||||
total_lines: 409
|
||||
- aider_percentage: 77.14
|
||||
total_lines: 758
|
||||
- aider_percentage: 76.07
|
||||
aider_total: 604
|
||||
end_date: '2025-02-06'
|
||||
end_tag: v0.74.0
|
||||
@@ -3813,6 +3818,8 @@
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 2
|
||||
"Viktor Sz\xE9pe": 3
|
||||
aider/resources/model-settings.yml:
|
||||
Paul Gauthier: 11
|
||||
aider/watch.py:
|
||||
Paul Gauthier (aider): 45
|
||||
benchmark/docker.sh:
|
||||
@@ -3839,8 +3846,174 @@
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 42
|
||||
grand_total:
|
||||
Paul Gauthier: 176
|
||||
Paul Gauthier: 187
|
||||
Paul Gauthier (aider): 604
|
||||
"Viktor Sz\xE9pe": 3
|
||||
start_tag: v0.73.0
|
||||
total_lines: 783
|
||||
total_lines: 794
|
||||
- aider_percentage: 44.78
|
||||
aider_total: 163
|
||||
end_date: '2025-02-24'
|
||||
end_tag: v0.75.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/args.py:
|
||||
Paul Gauthier: 7
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier: 12
|
||||
Paul Gauthier (aider): 4
|
||||
aider/commands.py:
|
||||
FeepingCreature (aider): 6
|
||||
aider/editor.py:
|
||||
Paul Gauthier: 7
|
||||
Paul Gauthier (aider): 5
|
||||
aider/io.py:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 4
|
||||
aider/linter.py:
|
||||
Paul Gauthier: 1
|
||||
aider/main.py:
|
||||
Paul Gauthier: 16
|
||||
aider/models.py:
|
||||
Paul Gauthier: 4
|
||||
aider/queries/tree-sitter-language-pack/javascript-tags.scm:
|
||||
Paul Gauthier: 5
|
||||
aider/queries/tree-sitter-languages/hcl-tags.scm:
|
||||
Paul Gauthier: 3
|
||||
Warren Krewenki: 74
|
||||
aider/queries/tree-sitter-languages/javascript-tags.scm:
|
||||
Paul Gauthier: 5
|
||||
aider/repomap.py:
|
||||
Paul Gauthier: 43
|
||||
Paul Gauthier (aider): 11
|
||||
aider/resources/model-settings.yml:
|
||||
Paul Gauthier: 12
|
||||
aider/special.py:
|
||||
Lucas Shadler: 1
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 1
|
||||
benchmark/Dockerfile:
|
||||
Paul Gauthier (aider): 1
|
||||
benchmark/benchmark.py:
|
||||
Paul Gauthier: 4
|
||||
benchmark/cpp-test.sh:
|
||||
Paul Gauthier: 1
|
||||
scripts/blame.py:
|
||||
Paul Gauthier (aider): 2
|
||||
scripts/issues.py:
|
||||
Paul Gauthier (aider): 17
|
||||
tests/basic/test_coder.py:
|
||||
Paul Gauthier (aider): 18
|
||||
tests/basic/test_editor.py:
|
||||
Antti Kaihola: 1
|
||||
Paul Gauthier (aider): 41
|
||||
tests/basic/test_models.py:
|
||||
Paul Gauthier (aider): 1
|
||||
tests/basic/test_repomap.py:
|
||||
Paul Gauthier (aider): 1
|
||||
tests/fixtures/languages/hcl/test.tf:
|
||||
Paul Gauthier (aider): 52
|
||||
grand_total:
|
||||
Antti Kaihola: 1
|
||||
FeepingCreature (aider): 6
|
||||
Lucas Shadler: 1
|
||||
Paul Gauthier: 125
|
||||
Paul Gauthier (aider): 157
|
||||
Warren Krewenki: 74
|
||||
start_tag: v0.74.0
|
||||
total_lines: 364
|
||||
- aider_percentage: 84.75
|
||||
aider_total: 1589
|
||||
end_date: '2025-03-10'
|
||||
end_tag: v0.76.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/args.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 25
|
||||
aider/args_formatter.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 3
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier: 54
|
||||
Paul Gauthier (aider): 29
|
||||
aider/deprecated.py:
|
||||
Paul Gauthier (aider): 107
|
||||
aider/io.py:
|
||||
Paul Gauthier: 7
|
||||
Paul Gauthier (aider): 127
|
||||
aider/main.py:
|
||||
Akira Komamura: 2
|
||||
Mattias: 1
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 16
|
||||
aider/models.py:
|
||||
Paul Gauthier: 6
|
||||
Paul Gauthier (aider): 68
|
||||
aider/queries/tree-sitter-language-pack/csharp-tags.scm:
|
||||
Paul Gauthier: 14
|
||||
Paul Gauthier (aider): 12
|
||||
aider/reasoning_tags.py:
|
||||
Paul Gauthier: 14
|
||||
Paul Gauthier (aider): 68
|
||||
aider/repo.py:
|
||||
Akira Komamura: 1
|
||||
Paul Gauthier (aider): 4
|
||||
aider/repomap.py:
|
||||
Paul Gauthier: 9
|
||||
aider/resources/model-settings.yml:
|
||||
Paul Gauthier: 61
|
||||
Paul Gauthier (aider): 32
|
||||
gmoz22: 4
|
||||
aider/website/_includes/leaderboard.js:
|
||||
Paul Gauthier (aider): 48
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 2
|
||||
benchmark/benchmark.py:
|
||||
Paul Gauthier: 1
|
||||
benchmark/problem_stats.py:
|
||||
Paul Gauthier (aider): 2
|
||||
docker/Dockerfile:
|
||||
Paul Gauthier: 1
|
||||
scripts/blame.py:
|
||||
Paul Gauthier: 1
|
||||
scripts/pip-compile.sh:
|
||||
Claudia Pellegrino: 10
|
||||
Paul Gauthier: 6
|
||||
Paul Gauthier (aider): 11
|
||||
scripts/update-history.py:
|
||||
Paul Gauthier: 1
|
||||
scripts/versionbump.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 64
|
||||
tests/basic/test_deprecated.py:
|
||||
Paul Gauthier: 10
|
||||
Paul Gauthier (aider): 130
|
||||
tests/basic/test_io.py:
|
||||
Paul Gauthier (aider): 54
|
||||
tests/basic/test_main.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 93
|
||||
tests/basic/test_model_info_manager.py:
|
||||
Paul Gauthier (aider): 72
|
||||
tests/basic/test_models.py:
|
||||
Paul Gauthier: 27
|
||||
Paul Gauthier (aider): 34
|
||||
tests/basic/test_reasoning.py:
|
||||
Paul Gauthier: 36
|
||||
Paul Gauthier (aider): 525
|
||||
tests/basic/test_repomap.py:
|
||||
Paul Gauthier: 2
|
||||
tests/basic/test_ssl_verification.py:
|
||||
Paul Gauthier (aider): 65
|
||||
grand_total:
|
||||
Akira Komamura: 3
|
||||
Claudia Pellegrino: 10
|
||||
Mattias: 1
|
||||
Paul Gauthier: 268
|
||||
Paul Gauthier (aider): 1589
|
||||
gmoz22: 4
|
||||
start_tag: v0.75.0
|
||||
total_lines: 1875
|
||||
|
||||
@@ -1,3 +1,29 @@
|
||||
- dirname: 2025-02-25-20-23-07--gemini-pro
|
||||
test_cases: 225
|
||||
model: gemini/gemini-2.0-pro-exp-02-05
|
||||
edit_format: whole
|
||||
commit_hash: 2fccd47
|
||||
pass_rate_1: 20.4
|
||||
pass_rate_2: 35.6
|
||||
pass_num_1: 46
|
||||
pass_num_2: 80
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 430
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 13
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model gemini/gemini-2.0-pro-exp-02-05
|
||||
date: 2025-02-25
|
||||
versions: 0.75.2.dev
|
||||
seconds_per_case: 34.8
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2024-12-21-18-41-18--polyglot-gpt-4o-mini
|
||||
test_cases: 225
|
||||
model: gpt-4o-mini-2024-07-18
|
||||
@@ -543,4 +569,162 @@
|
||||
date: 2025-01-21
|
||||
versions: 0.72.2.dev
|
||||
seconds_per_case: 24.2
|
||||
total_cost: 0.0000
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-02-15-19-51-22--chatgpt4o-feb15-diff
|
||||
test_cases: 223
|
||||
model: chatgpt-4o-latest (2025-02-15)
|
||||
edit_format: diff
|
||||
commit_hash: 108ce18-dirty
|
||||
pass_rate_1: 9.0
|
||||
pass_rate_2: 27.1
|
||||
pass_num_1: 20
|
||||
pass_num_2: 61
|
||||
percent_cases_well_formed: 93.3
|
||||
error_outputs: 66
|
||||
num_malformed_responses: 21
|
||||
num_with_malformed_responses: 15
|
||||
user_asks: 57
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model chatgpt-4o-latest
|
||||
date: 2025-02-15
|
||||
versions: 0.74.3.dev
|
||||
seconds_per_case: 12.4
|
||||
total_cost: 14.3703
|
||||
|
||||
- dirname: 2025-02-24-19-54-07--sonnet37-diff
|
||||
test_cases: 225
|
||||
model: claude-3-7-sonnet-20250219 (no thinking)
|
||||
edit_format: diff
|
||||
commit_hash: 75e9ee6
|
||||
pass_rate_1: 24.4
|
||||
pass_rate_2: 60.4
|
||||
pass_num_1: 55
|
||||
pass_num_2: 136
|
||||
percent_cases_well_formed: 93.3
|
||||
error_outputs: 16
|
||||
num_malformed_responses: 16
|
||||
num_with_malformed_responses: 15
|
||||
user_asks: 12
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 0
|
||||
total_tests: 225
|
||||
command: aider --model sonnet
|
||||
date: 2025-02-24
|
||||
versions: 0.74.4.dev
|
||||
seconds_per_case: 28.3
|
||||
total_cost: 17.7191
|
||||
|
||||
- dirname: 2025-02-24-21-47-23--sonnet37-diff-think-32k-64k
|
||||
test_cases: 225
|
||||
model: claude-3-7-sonnet-20250219 (32k thinking tokens)
|
||||
edit_format: diff
|
||||
commit_hash: 60d11a6, 93edbda
|
||||
pass_rate_1: 29.3
|
||||
pass_rate_2: 64.9
|
||||
pass_num_1: 66
|
||||
pass_num_2: 146
|
||||
percent_cases_well_formed: 97.8
|
||||
error_outputs: 66
|
||||
num_malformed_responses: 5
|
||||
num_with_malformed_responses: 5
|
||||
user_asks: 5
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: "aider --model anthropic/claude-3-7-sonnet-20250219 # plus yml config"
|
||||
date: 2025-02-24
|
||||
versions: 0.75.1.dev
|
||||
seconds_per_case: 105.2
|
||||
total_cost: 36.8343
|
||||
|
||||
- dirname: 2025-02-27-20-26-15--gpt45-diff3
|
||||
test_cases: 224
|
||||
model: gpt-4.5-preview
|
||||
edit_format: diff
|
||||
commit_hash: b462e55-dirty
|
||||
pass_rate_1: 22.3
|
||||
pass_rate_2: 44.9
|
||||
pass_num_1: 50
|
||||
pass_num_2: 101
|
||||
percent_cases_well_formed: 97.3
|
||||
error_outputs: 10
|
||||
num_malformed_responses: 8
|
||||
num_with_malformed_responses: 6
|
||||
user_asks: 15
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model openai/gpt-4.5-preview
|
||||
date: 2025-02-27
|
||||
versions: 0.75.2.dev
|
||||
seconds_per_case: 113.5
|
||||
total_cost: 183.1802
|
||||
|
||||
- dirname: 2025-03-06-17-40-24--qwq32b-diff-temp-topp-ex-sys-remind-user-for-real
|
||||
test_cases: 225
|
||||
model: QwQ-32B
|
||||
edit_format: diff
|
||||
commit_hash: 51d118f-dirty
|
||||
pass_rate_1: 8.0
|
||||
pass_rate_2: 20.9
|
||||
pass_num_1: 18
|
||||
pass_num_2: 47
|
||||
percent_cases_well_formed: 67.6
|
||||
error_outputs: 145
|
||||
num_malformed_responses: 143
|
||||
num_with_malformed_responses: 73
|
||||
user_asks: 17
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model fireworks_ai/accounts/fireworks/models/qwq-32b
|
||||
date: 2025-03-06
|
||||
versions: 0.75.3.dev
|
||||
seconds_per_case: 228.6
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-03-07-15-11-27--qwq32b-arch-temp-topp-again
|
||||
test_cases: 225
|
||||
model: QwQ-32B + Qwen 2.5 Coder Instruct
|
||||
edit_format: architect
|
||||
commit_hash: 52162a5
|
||||
editor_model: fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct
|
||||
editor_edit_format: editor-diff
|
||||
pass_rate_1: 9.8
|
||||
pass_rate_2: 26.2
|
||||
pass_num_1: 22
|
||||
pass_num_2: 59
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 122
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 489
|
||||
lazy_comments: 8
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model fireworks_ai/accounts/fireworks/models/qwq-32b --architect
|
||||
date: 2025-03-07
|
||||
versions: 0.75.3.dev
|
||||
seconds_per_case: 137.4
|
||||
total_cost: 0
|
||||
@@ -11,14 +11,14 @@ cd /to/your/project
|
||||
# Work with DeepSeek via DeepSeek's API
|
||||
aider --model deepseek --api-key deepseek=your-key-goes-here
|
||||
|
||||
# Work with Claude 3.5 Sonnet via Anthropic's API
|
||||
# Work with Claude 3.7 Sonnet via Anthropic's API
|
||||
aider --model sonnet --api-key anthropic=your-key-goes-here
|
||||
|
||||
# Work with GPT-4o via OpenAI's API
|
||||
aider --model gpt-4o --api-key openai=your-key-goes-here
|
||||
|
||||
# Work with Sonnet via OpenRouter's API
|
||||
aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here
|
||||
aider --model openrouter/anthropic/claude-3.7-sonnet --api-key openrouter=your-key-goes-here
|
||||
|
||||
# Work with DeepSeek via OpenRouter's API
|
||||
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here
|
||||
|
||||
@@ -23,6 +23,16 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
return (label && label.includes(HIGHLIGHT_MODEL)) ? 'rgba(255, 99, 132, 1)' : 'rgba(54, 162, 235, 1)';
|
||||
},
|
||||
borderWidth: 1
|
||||
}, {
|
||||
label: 'Total Cost ($)',
|
||||
data: [],
|
||||
type: 'scatter',
|
||||
yAxisID: 'y1',
|
||||
backgroundColor: 'rgba(153, 102, 255, 1)',
|
||||
borderColor: '#fff',
|
||||
borderWidth: 1,
|
||||
pointRadius: 5,
|
||||
pointHoverRadius: 7
|
||||
}]
|
||||
};
|
||||
|
||||
@@ -32,7 +42,8 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
model: '{{ row.model }}',
|
||||
pass_rate: {{ row[pass_rate_field] }},
|
||||
percent_cases_well_formed: {{ row.percent_cases_well_formed }},
|
||||
edit_format: '{{ row.edit_format | default: "diff" }}'
|
||||
edit_format: '{{ row.edit_format | default: "diff" }}',
|
||||
total_cost: {{ row.total_cost | default: 0 }}
|
||||
});
|
||||
{% endfor %}
|
||||
|
||||
@@ -43,6 +54,7 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
displayedData = [];
|
||||
leaderboardData.labels = [];
|
||||
leaderboardData.datasets[0].data = [];
|
||||
leaderboardData.datasets[1].data = [];
|
||||
|
||||
allData.forEach(function(row, index) {
|
||||
var rowElement = document.getElementById('edit-row-' + index);
|
||||
@@ -53,6 +65,8 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
displayedData.push(row);
|
||||
leaderboardData.labels.push(row.model);
|
||||
leaderboardData.datasets[0].data.push(row.pass_rate);
|
||||
// Only include cost if it's not zero (placeholder for unknown)
|
||||
leaderboardData.datasets[1].data.push(row.total_cost > 0 ? row.total_cost : null);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -111,10 +125,29 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
fillStyle: blueDiagonalPattern,
|
||||
strokeStyle: 'rgba(54, 162, 235, 1)',
|
||||
lineWidth: 1
|
||||
},
|
||||
{
|
||||
text: 'Total Cost ($)',
|
||||
fillStyle: 'rgba(153, 102, 255, 1)',
|
||||
strokeStyle: '#fff',
|
||||
lineWidth: 1,
|
||||
pointStyle: 'circle'
|
||||
}
|
||||
];
|
||||
}
|
||||
}
|
||||
},
|
||||
tooltip: {
|
||||
callbacks: {
|
||||
label: function(context) {
|
||||
const datasetLabel = context.dataset.label || '';
|
||||
const value = context.parsed.y;
|
||||
if (datasetLabel === 'Total Cost ($)') {
|
||||
return datasetLabel + ': $' + value.toFixed(2);
|
||||
}
|
||||
return datasetLabel + ': ' + value.toFixed(1) + '%';
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
scales: {
|
||||
@@ -125,6 +158,17 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
text: 'Percent completed correctly'
|
||||
}
|
||||
},
|
||||
y1: {
|
||||
beginAtZero: true,
|
||||
position: 'right',
|
||||
grid: {
|
||||
drawOnChartArea: false
|
||||
},
|
||||
title: {
|
||||
display: true,
|
||||
text: 'Total Cost ($)'
|
||||
}
|
||||
},
|
||||
x: {
|
||||
ticks: {
|
||||
callback: function(value, index) {
|
||||
@@ -173,6 +217,7 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
displayedData = [];
|
||||
leaderboardData.labels = [];
|
||||
leaderboardData.datasets[0].data = [];
|
||||
leaderboardData.datasets[1].data = [];
|
||||
|
||||
for (var i = 0; i < rows.length; i++) {
|
||||
var rowText = rows[i].textContent;
|
||||
@@ -181,6 +226,8 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
displayedData.push(allData[i]);
|
||||
leaderboardData.labels.push(allData[i].model);
|
||||
leaderboardData.datasets[0].data.push(allData[i].pass_rate);
|
||||
// Only include cost if it's not zero (placeholder for unknown)
|
||||
leaderboardData.datasets[1].data.push(allData[i].total_cost > 0 ? allData[i].total_cost : null);
|
||||
} else {
|
||||
rows[i].style.display = 'none';
|
||||
}
|
||||
|
||||
@@ -39,9 +39,7 @@ Aider will directly edit the code in your local source files,
|
||||
and [git commit the changes](https://aider.chat/docs/git.html)
|
||||
with sensible commit messages.
|
||||
You can start a new project or work with an existing git repo.
|
||||
Aider works well with GPT 3.5, GPT-4, GPT-4 Turbo with Vision,
|
||||
and Claude 3 Opus.
|
||||
It also supports [connecting to almost any LLM](https://aider.chat/docs/llms.html).
|
||||
{% include works-best.md %}
|
||||
|
||||
Use the `--browser` switch to launch the browser version of aider:
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,39 +20,6 @@
|
||||
## Specify the model to use for the main chat
|
||||
#model: xxx
|
||||
|
||||
## Use claude-3-opus-20240229 model for the main chat
|
||||
#opus: false
|
||||
|
||||
## Use claude-3-5-sonnet-20241022 model for the main chat
|
||||
#sonnet: false
|
||||
|
||||
## Use claude-3-5-haiku-20241022 model for the main chat
|
||||
#haiku: false
|
||||
|
||||
## Use gpt-4-0613 model for the main chat
|
||||
#4: false
|
||||
|
||||
## Use gpt-4o model for the main chat
|
||||
#4o: false
|
||||
|
||||
## Use gpt-4o-mini model for the main chat
|
||||
#mini: false
|
||||
|
||||
## Use gpt-4-1106-preview model for the main chat
|
||||
#4-turbo: false
|
||||
|
||||
## Use gpt-3.5-turbo model for the main chat
|
||||
#35turbo: false
|
||||
|
||||
## Use deepseek/deepseek-chat model for the main chat
|
||||
#deepseek: false
|
||||
|
||||
## Use o1-mini model for the main chat
|
||||
#o1-mini: false
|
||||
|
||||
## Use o1-preview model for the main chat
|
||||
#o1-preview: false
|
||||
|
||||
########################
|
||||
# API Keys and settings:
|
||||
|
||||
@@ -116,6 +83,9 @@
|
||||
## Set the reasoning_effort API parameter (default: not set)
|
||||
#reasoning-effort: xxx
|
||||
|
||||
## Set the thinking token budget for models that support it (default: not set)
|
||||
#thinking-tokens: xxx
|
||||
|
||||
## Verify the SSL cert when connecting to models (default: True)
|
||||
#verify-ssl: true
|
||||
|
||||
@@ -198,7 +168,7 @@
|
||||
#user-input-color: #00cc00
|
||||
|
||||
## Set the color for tool output (default: None)
|
||||
#tool-output-color: xxx
|
||||
#tool-output-color: "xxx"
|
||||
|
||||
## Set the color for tool error messages (default: #FF2222)
|
||||
#tool-error-color: #FF2222
|
||||
@@ -210,16 +180,16 @@
|
||||
#assistant-output-color: #0088ff
|
||||
|
||||
## Set the color for the completion menu (default: terminal's default text color)
|
||||
#completion-menu-color: xxx
|
||||
#completion-menu-color: "xxx"
|
||||
|
||||
## Set the background color for the completion menu (default: terminal's default background color)
|
||||
#completion-menu-bg-color: xxx
|
||||
#completion-menu-bg-color: "xxx"
|
||||
|
||||
## Set the color for the current item in the completion menu (default: terminal's default background color)
|
||||
#completion-menu-current-color: xxx
|
||||
#completion-menu-current-color: "xxx"
|
||||
|
||||
## Set the background color for the current item in the completion menu (default: terminal's default text color)
|
||||
#completion-menu-current-bg-color: xxx
|
||||
#completion-menu-current-bg-color: "xxx"
|
||||
|
||||
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light, or a Pygments builtin style, see https://pygments.org/styles for available themes)
|
||||
#code-theme: default
|
||||
@@ -431,8 +401,50 @@
|
||||
## Enable/disable multi-line input mode with Meta-Enter to submit (default: False)
|
||||
#multiline: false
|
||||
|
||||
## Enable/disable terminal bell notifications when LLM responses are ready (default: False)
|
||||
#notifications: false
|
||||
|
||||
## Specify a command to run for notifications instead of the terminal bell. If not specified, a default command for your OS may be used.
|
||||
#notifications-command: xxx
|
||||
|
||||
## Enable/disable detection and offering to add URLs to chat (default: True)
|
||||
#detect-urls: true
|
||||
|
||||
## Specify which editor to use for the /editor command
|
||||
#editor: xxx
|
||||
|
||||
############################
|
||||
# Deprecated model settings:
|
||||
|
||||
## Use claude-3-opus-20240229 model for the main chat (deprecated, use --model)
|
||||
#opus: false
|
||||
|
||||
## Use anthropic/claude-3-7-sonnet-20250219 model for the main chat (deprecated, use --model)
|
||||
#sonnet: false
|
||||
|
||||
## Use claude-3-5-haiku-20241022 model for the main chat (deprecated, use --model)
|
||||
#haiku: false
|
||||
|
||||
## Use gpt-4-0613 model for the main chat (deprecated, use --model)
|
||||
#4: false
|
||||
|
||||
## Use gpt-4o model for the main chat (deprecated, use --model)
|
||||
#4o: false
|
||||
|
||||
## Use gpt-4o-mini model for the main chat (deprecated, use --model)
|
||||
#mini: false
|
||||
|
||||
## Use gpt-4-1106-preview model for the main chat (deprecated, use --model)
|
||||
#4-turbo: false
|
||||
|
||||
## Use gpt-3.5-turbo model for the main chat (deprecated, use --model)
|
||||
#35turbo: false
|
||||
|
||||
## Use deepseek/deepseek-chat model for the main chat (deprecated, use --model)
|
||||
#deepseek: false
|
||||
|
||||
## Use o1-mini model for the main chat (deprecated, use --model)
|
||||
#o1-mini: false
|
||||
|
||||
## Use o1-preview model for the main chat (deprecated, use --model)
|
||||
#o1-preview: false
|
||||
|
||||
@@ -24,39 +24,6 @@
|
||||
## Specify the model to use for the main chat
|
||||
#AIDER_MODEL=
|
||||
|
||||
## Use claude-3-opus-20240229 model for the main chat
|
||||
#AIDER_OPUS=
|
||||
|
||||
## Use claude-3-5-sonnet-20241022 model for the main chat
|
||||
#AIDER_SONNET=
|
||||
|
||||
## Use claude-3-5-haiku-20241022 model for the main chat
|
||||
#AIDER_HAIKU=
|
||||
|
||||
## Use gpt-4-0613 model for the main chat
|
||||
#AIDER_4=
|
||||
|
||||
## Use gpt-4o model for the main chat
|
||||
#AIDER_4O=
|
||||
|
||||
## Use gpt-4o-mini model for the main chat
|
||||
#AIDER_MINI=
|
||||
|
||||
## Use gpt-4-1106-preview model for the main chat
|
||||
#AIDER_4_TURBO=
|
||||
|
||||
## Use gpt-3.5-turbo model for the main chat
|
||||
#AIDER_35TURBO=
|
||||
|
||||
## Use deepseek/deepseek-chat model for the main chat
|
||||
#AIDER_DEEPSEEK=
|
||||
|
||||
## Use o1-mini model for the main chat
|
||||
#AIDER_O1_MINI=
|
||||
|
||||
## Use o1-preview model for the main chat
|
||||
#AIDER_O1_PREVIEW=
|
||||
|
||||
########################
|
||||
# API Keys and settings:
|
||||
|
||||
@@ -105,6 +72,9 @@
|
||||
## Set the reasoning_effort API parameter (default: not set)
|
||||
#AIDER_REASONING_EFFORT=
|
||||
|
||||
## Set the thinking token budget for models that support it (default: not set)
|
||||
#AIDER_THINKING_TOKENS=
|
||||
|
||||
## Verify the SSL cert when connecting to models (default: True)
|
||||
#AIDER_VERIFY_SSL=true
|
||||
|
||||
@@ -399,8 +369,50 @@
|
||||
## Enable/disable multi-line input mode with Meta-Enter to submit (default: False)
|
||||
#AIDER_MULTILINE=false
|
||||
|
||||
## Enable/disable terminal bell notifications when LLM responses are ready (default: False)
|
||||
#AIDER_NOTIFICATIONS=false
|
||||
|
||||
## Specify a command to run for notifications instead of the terminal bell. If not specified, a default command for your OS may be used.
|
||||
#AIDER_NOTIFICATIONS_COMMAND=
|
||||
|
||||
## Enable/disable detection and offering to add URLs to chat (default: True)
|
||||
#AIDER_DETECT_URLS=true
|
||||
|
||||
## Specify which editor to use for the /editor command
|
||||
#AIDER_EDITOR=
|
||||
|
||||
############################
|
||||
# Deprecated model settings:
|
||||
|
||||
## Use claude-3-opus-20240229 model for the main chat (deprecated, use --model)
|
||||
#AIDER_OPUS=false
|
||||
|
||||
## Use anthropic/claude-3-7-sonnet-20250219 model for the main chat (deprecated, use --model)
|
||||
#AIDER_SONNET=false
|
||||
|
||||
## Use claude-3-5-haiku-20241022 model for the main chat (deprecated, use --model)
|
||||
#AIDER_HAIKU=false
|
||||
|
||||
## Use gpt-4-0613 model for the main chat (deprecated, use --model)
|
||||
#AIDER_4=false
|
||||
|
||||
## Use gpt-4o model for the main chat (deprecated, use --model)
|
||||
#AIDER_4O=false
|
||||
|
||||
## Use gpt-4o-mini model for the main chat (deprecated, use --model)
|
||||
#AIDER_MINI=false
|
||||
|
||||
## Use gpt-4-1106-preview model for the main chat (deprecated, use --model)
|
||||
#AIDER_4_TURBO=false
|
||||
|
||||
## Use gpt-3.5-turbo model for the main chat (deprecated, use --model)
|
||||
#AIDER_35TURBO=false
|
||||
|
||||
## Use deepseek/deepseek-chat model for the main chat (deprecated, use --model)
|
||||
#AIDER_DEEPSEEK=false
|
||||
|
||||
## Use o1-mini model for the main chat (deprecated, use --model)
|
||||
#AIDER_O1_MINI=false
|
||||
|
||||
## Use o1-preview model for the main chat (deprecated, use --model)
|
||||
#AIDER_O1_PREVIEW=false
|
||||
|
||||
BIN
aider/website/assets/thinking.jpg
Normal file
BIN
aider/website/assets/thinking.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 341 KiB |
@@ -172,6 +172,7 @@ cog.out("```\n")
|
||||
streaming: true
|
||||
editor_model_name: null
|
||||
editor_edit_format: null
|
||||
reasoning_tag: null
|
||||
remove_reasoning: null
|
||||
system_prompt_prefix: null
|
||||
|
||||
@@ -223,6 +224,32 @@ cog.out("```\n")
|
||||
editor_model_name: anthropic/claude-3-5-sonnet-20241022
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: anthropic/claude-3-7-sonnet-20250219
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-3-7-sonnet-20250219
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: anthropic/claude-3-7-sonnet-latest
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-3-7-sonnet-latest
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: anthropic/claude-3-haiku-20240307
|
||||
weak_model_name: anthropic/claude-3-haiku-20240307
|
||||
examples_as_sys_msg: true
|
||||
@@ -288,6 +315,58 @@ cog.out("```\n")
|
||||
editor_model_name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: bedrock_converse/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: bedrock_converse/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: claude-3-5-haiku-20241022
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
@@ -324,6 +403,32 @@ cog.out("```\n")
|
||||
editor_model_name: claude-3-5-sonnet-20241022
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: claude-3-7-sonnet-20250219
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: claude-3-7-sonnet-20250219
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: claude-3-7-sonnet-latest
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: claude-3-7-sonnet-latest
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: claude-3-haiku-20240307
|
||||
weak_model_name: claude-3-haiku-20240307
|
||||
examples_as_sys_msg: true
|
||||
@@ -408,7 +513,7 @@ cog.out("```\n")
|
||||
use_temperature: false
|
||||
editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
editor_edit_format: editor-diff
|
||||
remove_reasoning: think
|
||||
reasoning_tag: think
|
||||
|
||||
- name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
edit_format: diff
|
||||
@@ -418,6 +523,19 @@ cog.out("```\n")
|
||||
extra_params:
|
||||
max_tokens: 128000
|
||||
|
||||
- name: fireworks_ai/accounts/fireworks/models/qwq-32b
|
||||
edit_format: diff
|
||||
weak_model_name: fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 32000
|
||||
top_p: 0.95
|
||||
use_temperature: 0.6
|
||||
editor_model_name: fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct
|
||||
editor_edit_format: editor-diff
|
||||
reasoning_tag: think
|
||||
|
||||
- name: gemini/gemini-1.5-flash-002
|
||||
|
||||
- name: gemini/gemini-1.5-flash-exp-0827
|
||||
@@ -532,6 +650,16 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-4.5-preview
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: gpt-4o
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
@@ -567,6 +695,18 @@ cog.out("```\n")
|
||||
weak_model_name: groq/llama3-8b-8192
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: groq/qwen-qwq-32b
|
||||
edit_format: diff
|
||||
weak_model_name: groq/qwen-2.5-coder-32b
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
max_tokens: 128000
|
||||
top_p: 0.95
|
||||
use_temperature: 0.6
|
||||
editor_model_name: groq/qwen-2.5-coder-32b
|
||||
editor_edit_format: editor-diff
|
||||
reasoning_tag: think
|
||||
|
||||
- name: o1
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
@@ -603,6 +743,16 @@ cog.out("```\n")
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
|
||||
- name: openai/gpt-4.5-preview
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
editor_model_name: openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openai/gpt-4o
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
@@ -696,12 +846,50 @@ cog.out("```\n")
|
||||
editor_model_name: openrouter/anthropic/claude-3.5-sonnet:beta
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/anthropic/claude-3.7-sonnet
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: openrouter/anthropic/claude-3.7-sonnet
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/anthropic/claude-3.7-sonnet:beta
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: openrouter/anthropic/claude-3.7-sonnet
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/deepseek/deepseek-chat
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: openrouter/deepseek/deepseek-chat:free
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/deepseek/deepseek-chat:free
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/deepseek/deepseek-chat:free
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/deepseek/deepseek-coder
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
@@ -715,8 +903,8 @@ cog.out("```\n")
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
include_reasoning: true
|
||||
caches_by_default: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/deepseek/deepseek-chat
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
@@ -796,6 +984,15 @@ cog.out("```\n")
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
|
||||
- name: openrouter/openai/o3-mini-high
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: 'Formatting re-enabled. '
|
||||
|
||||
- name: openrouter/qwen/qwen-2.5-coder-32b-instruct
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
|
||||
@@ -803,6 +1000,16 @@ cog.out("```\n")
|
||||
editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: vertex_ai-anthropic_models/vertex_ai/claude-3-7-sonnet@20250219
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 64000
|
||||
editor_model_name: vertex_ai-anthropic_models/vertex_ai/claude-3-7-sonnet@20250219
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: vertex_ai/claude-3-5-haiku@20241022
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
@@ -830,6 +1037,16 @@ cog.out("```\n")
|
||||
editor_model_name: vertex_ai/claude-3-5-sonnet@20240620
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: vertex_ai/claude-3-7-sonnet@20250219
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 64000
|
||||
editor_model_name: vertex_ai/claude-3-7-sonnet@20250219
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: vertex_ai/claude-3-opus@20240229
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
|
||||
@@ -74,39 +74,6 @@ cog.outl("```")
|
||||
## Specify the model to use for the main chat
|
||||
#model: xxx
|
||||
|
||||
## Use claude-3-opus-20240229 model for the main chat
|
||||
#opus: false
|
||||
|
||||
## Use claude-3-5-sonnet-20241022 model for the main chat
|
||||
#sonnet: false
|
||||
|
||||
## Use claude-3-5-haiku-20241022 model for the main chat
|
||||
#haiku: false
|
||||
|
||||
## Use gpt-4-0613 model for the main chat
|
||||
#4: false
|
||||
|
||||
## Use gpt-4o model for the main chat
|
||||
#4o: false
|
||||
|
||||
## Use gpt-4o-mini model for the main chat
|
||||
#mini: false
|
||||
|
||||
## Use gpt-4-1106-preview model for the main chat
|
||||
#4-turbo: false
|
||||
|
||||
## Use gpt-3.5-turbo model for the main chat
|
||||
#35turbo: false
|
||||
|
||||
## Use deepseek/deepseek-chat model for the main chat
|
||||
#deepseek: false
|
||||
|
||||
## Use o1-mini model for the main chat
|
||||
#o1-mini: false
|
||||
|
||||
## Use o1-preview model for the main chat
|
||||
#o1-preview: false
|
||||
|
||||
########################
|
||||
# API Keys and settings:
|
||||
|
||||
@@ -170,6 +137,9 @@ cog.outl("```")
|
||||
## Set the reasoning_effort API parameter (default: not set)
|
||||
#reasoning-effort: xxx
|
||||
|
||||
## Set the thinking token budget for models that support it (default: not set)
|
||||
#thinking-tokens: xxx
|
||||
|
||||
## Verify the SSL cert when connecting to models (default: True)
|
||||
#verify-ssl: true
|
||||
|
||||
@@ -252,7 +222,7 @@ cog.outl("```")
|
||||
#user-input-color: #00cc00
|
||||
|
||||
## Set the color for tool output (default: None)
|
||||
#tool-output-color: xxx
|
||||
#tool-output-color: "xxx"
|
||||
|
||||
## Set the color for tool error messages (default: #FF2222)
|
||||
#tool-error-color: #FF2222
|
||||
@@ -264,16 +234,16 @@ cog.outl("```")
|
||||
#assistant-output-color: #0088ff
|
||||
|
||||
## Set the color for the completion menu (default: terminal's default text color)
|
||||
#completion-menu-color: xxx
|
||||
#completion-menu-color: "xxx"
|
||||
|
||||
## Set the background color for the completion menu (default: terminal's default background color)
|
||||
#completion-menu-bg-color: xxx
|
||||
#completion-menu-bg-color: "xxx"
|
||||
|
||||
## Set the color for the current item in the completion menu (default: terminal's default background color)
|
||||
#completion-menu-current-color: xxx
|
||||
#completion-menu-current-color: "xxx"
|
||||
|
||||
## Set the background color for the current item in the completion menu (default: terminal's default text color)
|
||||
#completion-menu-current-bg-color: xxx
|
||||
#completion-menu-current-bg-color: "xxx"
|
||||
|
||||
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light, or a Pygments builtin style, see https://pygments.org/styles for available themes)
|
||||
#code-theme: default
|
||||
@@ -485,10 +455,52 @@ cog.outl("```")
|
||||
## Enable/disable multi-line input mode with Meta-Enter to submit (default: False)
|
||||
#multiline: false
|
||||
|
||||
## Enable/disable terminal bell notifications when LLM responses are ready (default: False)
|
||||
#notifications: false
|
||||
|
||||
## Specify a command to run for notifications instead of the terminal bell. If not specified, a default command for your OS may be used.
|
||||
#notifications-command: xxx
|
||||
|
||||
## Enable/disable detection and offering to add URLs to chat (default: True)
|
||||
#detect-urls: true
|
||||
|
||||
## Specify which editor to use for the /editor command
|
||||
#editor: xxx
|
||||
|
||||
############################
|
||||
# Deprecated model settings:
|
||||
|
||||
## Use claude-3-opus-20240229 model for the main chat (deprecated, use --model)
|
||||
#opus: false
|
||||
|
||||
## Use anthropic/claude-3-7-sonnet-20250219 model for the main chat (deprecated, use --model)
|
||||
#sonnet: false
|
||||
|
||||
## Use claude-3-5-haiku-20241022 model for the main chat (deprecated, use --model)
|
||||
#haiku: false
|
||||
|
||||
## Use gpt-4-0613 model for the main chat (deprecated, use --model)
|
||||
#4: false
|
||||
|
||||
## Use gpt-4o model for the main chat (deprecated, use --model)
|
||||
#4o: false
|
||||
|
||||
## Use gpt-4o-mini model for the main chat (deprecated, use --model)
|
||||
#mini: false
|
||||
|
||||
## Use gpt-4-1106-preview model for the main chat (deprecated, use --model)
|
||||
#4-turbo: false
|
||||
|
||||
## Use gpt-3.5-turbo model for the main chat (deprecated, use --model)
|
||||
#35turbo: false
|
||||
|
||||
## Use deepseek/deepseek-chat model for the main chat (deprecated, use --model)
|
||||
#deepseek: false
|
||||
|
||||
## Use o1-mini model for the main chat (deprecated, use --model)
|
||||
#o1-mini: false
|
||||
|
||||
## Use o1-preview model for the main chat (deprecated, use --model)
|
||||
#o1-preview: false
|
||||
```
|
||||
<!--[[[end]]]-->
|
||||
|
||||
@@ -64,39 +64,6 @@ cog.outl("```")
|
||||
## Specify the model to use for the main chat
|
||||
#AIDER_MODEL=
|
||||
|
||||
## Use claude-3-opus-20240229 model for the main chat
|
||||
#AIDER_OPUS=
|
||||
|
||||
## Use claude-3-5-sonnet-20241022 model for the main chat
|
||||
#AIDER_SONNET=
|
||||
|
||||
## Use claude-3-5-haiku-20241022 model for the main chat
|
||||
#AIDER_HAIKU=
|
||||
|
||||
## Use gpt-4-0613 model for the main chat
|
||||
#AIDER_4=
|
||||
|
||||
## Use gpt-4o model for the main chat
|
||||
#AIDER_4O=
|
||||
|
||||
## Use gpt-4o-mini model for the main chat
|
||||
#AIDER_MINI=
|
||||
|
||||
## Use gpt-4-1106-preview model for the main chat
|
||||
#AIDER_4_TURBO=
|
||||
|
||||
## Use gpt-3.5-turbo model for the main chat
|
||||
#AIDER_35TURBO=
|
||||
|
||||
## Use deepseek/deepseek-chat model for the main chat
|
||||
#AIDER_DEEPSEEK=
|
||||
|
||||
## Use o1-mini model for the main chat
|
||||
#AIDER_O1_MINI=
|
||||
|
||||
## Use o1-preview model for the main chat
|
||||
#AIDER_O1_PREVIEW=
|
||||
|
||||
########################
|
||||
# API Keys and settings:
|
||||
|
||||
@@ -145,6 +112,9 @@ cog.outl("```")
|
||||
## Set the reasoning_effort API parameter (default: not set)
|
||||
#AIDER_REASONING_EFFORT=
|
||||
|
||||
## Set the thinking token budget for models that support it (default: not set)
|
||||
#AIDER_THINKING_TOKENS=
|
||||
|
||||
## Verify the SSL cert when connecting to models (default: True)
|
||||
#AIDER_VERIFY_SSL=true
|
||||
|
||||
@@ -439,10 +409,52 @@ cog.outl("```")
|
||||
## Enable/disable multi-line input mode with Meta-Enter to submit (default: False)
|
||||
#AIDER_MULTILINE=false
|
||||
|
||||
## Enable/disable terminal bell notifications when LLM responses are ready (default: False)
|
||||
#AIDER_NOTIFICATIONS=false
|
||||
|
||||
## Specify a command to run for notifications instead of the terminal bell. If not specified, a default command for your OS may be used.
|
||||
#AIDER_NOTIFICATIONS_COMMAND=
|
||||
|
||||
## Enable/disable detection and offering to add URLs to chat (default: True)
|
||||
#AIDER_DETECT_URLS=true
|
||||
|
||||
## Specify which editor to use for the /editor command
|
||||
#AIDER_EDITOR=
|
||||
|
||||
############################
|
||||
# Deprecated model settings:
|
||||
|
||||
## Use claude-3-opus-20240229 model for the main chat (deprecated, use --model)
|
||||
#AIDER_OPUS=false
|
||||
|
||||
## Use anthropic/claude-3-7-sonnet-20250219 model for the main chat (deprecated, use --model)
|
||||
#AIDER_SONNET=false
|
||||
|
||||
## Use claude-3-5-haiku-20241022 model for the main chat (deprecated, use --model)
|
||||
#AIDER_HAIKU=false
|
||||
|
||||
## Use gpt-4-0613 model for the main chat (deprecated, use --model)
|
||||
#AIDER_4=false
|
||||
|
||||
## Use gpt-4o model for the main chat (deprecated, use --model)
|
||||
#AIDER_4O=false
|
||||
|
||||
## Use gpt-4o-mini model for the main chat (deprecated, use --model)
|
||||
#AIDER_MINI=false
|
||||
|
||||
## Use gpt-4-1106-preview model for the main chat (deprecated, use --model)
|
||||
#AIDER_4_TURBO=false
|
||||
|
||||
## Use gpt-3.5-turbo model for the main chat (deprecated, use --model)
|
||||
#AIDER_35TURBO=false
|
||||
|
||||
## Use deepseek/deepseek-chat model for the main chat (deprecated, use --model)
|
||||
#AIDER_DEEPSEEK=false
|
||||
|
||||
## Use o1-mini model for the main chat (deprecated, use --model)
|
||||
#AIDER_O1_MINI=false
|
||||
|
||||
## Use o1-preview model for the main chat (deprecated, use --model)
|
||||
#AIDER_O1_PREVIEW=false
|
||||
```
|
||||
<!--[[[end]]]-->
|
||||
|
||||
@@ -20,7 +20,8 @@ Multiple aliases can be defined by using the `--alias` option multiple times. Ea
|
||||
|
||||
## Configuration File
|
||||
|
||||
You can also define aliases in your [`.aider.conf.yml` file](https://aider.chat/docs/config/aider_conf.html):
|
||||
Of course,
|
||||
you can also define aliases in your [`.aider.conf.yml` file](https://aider.chat/docs/config/aider_conf.html):
|
||||
|
||||
```yaml
|
||||
alias:
|
||||
@@ -31,13 +32,35 @@ alias:
|
||||
|
||||
## Using Aliases
|
||||
|
||||
Once defined, you can use the alias instead of the full model name:
|
||||
Once defined, you can use the alias instead of the full model name from the command line:
|
||||
|
||||
```bash
|
||||
aider --model fast # Uses gpt-4o-mini
|
||||
aider --model smart # Uses o3-mini
|
||||
```
|
||||
|
||||
Or with the `/model` command in-chat:
|
||||
|
||||
```
|
||||
Aider v0.75.3
|
||||
Main model: anthropic/claude-3-7-sonnet-20250219 with diff edit format, prompt cache, infinite output
|
||||
Weak model: claude-3-5-sonnet-20241022
|
||||
Git repo: .git with 406 files
|
||||
Repo-map: using 4096 tokens, files refresh
|
||||
─────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
> /model fast
|
||||
|
||||
Aider v0.75.3
|
||||
Main model: gpt-4o-mini with diff edit format
|
||||
─────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
diff> /model smart
|
||||
|
||||
Aider v0.75.3
|
||||
Main model: o3-mini with diff edit format
|
||||
─────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
>
|
||||
```
|
||||
|
||||
## Built-in Aliases
|
||||
|
||||
Aider includes some built-in aliases for convenience:
|
||||
@@ -60,7 +83,7 @@ for alias, model in sorted(MODEL_ALIASES.items()):
|
||||
- `haiku`: claude-3-5-haiku-20241022
|
||||
- `opus`: claude-3-opus-20240229
|
||||
- `r1`: deepseek/deepseek-reasoner
|
||||
- `sonnet`: claude-3-5-sonnet-20241022
|
||||
- `sonnet`: anthropic/claude-3-7-sonnet-20250219
|
||||
<!--[[[end]]]-->
|
||||
|
||||
## Priority
|
||||
|
||||
@@ -22,18 +22,15 @@ from aider.args import get_md_help
|
||||
cog.out(get_md_help())
|
||||
]]]-->
|
||||
```
|
||||
usage: aider [-h] [--model] [--opus] [--sonnet] [--haiku] [--4]
|
||||
[--4o] [--mini] [--4-turbo] [--35turbo] [--deepseek]
|
||||
[--o1-mini] [--o1-preview] [--openai-api-key]
|
||||
[--anthropic-api-key] [--openai-api-base]
|
||||
[--openai-api-type] [--openai-api-version]
|
||||
[--openai-api-deployment-id] [--openai-organization-id]
|
||||
[--set-env] [--api-key] [--list-models]
|
||||
[--model-settings-file] [--model-metadata-file]
|
||||
[--alias] [--reasoning-effort]
|
||||
[--verify-ssl | --no-verify-ssl] [--timeout]
|
||||
[--edit-format] [--architect] [--weak-model]
|
||||
[--editor-model] [--editor-edit-format]
|
||||
usage: aider [-h] [--model] [--openai-api-key] [--anthropic-api-key]
|
||||
[--openai-api-base] [--openai-api-type]
|
||||
[--openai-api-version] [--openai-api-deployment-id]
|
||||
[--openai-organization-id] [--set-env] [--api-key]
|
||||
[--list-models] [--model-settings-file]
|
||||
[--model-metadata-file] [--alias] [--reasoning-effort]
|
||||
[--thinking-tokens] [--verify-ssl | --no-verify-ssl]
|
||||
[--timeout] [--edit-format] [--architect]
|
||||
[--weak-model] [--editor-model] [--editor-edit-format]
|
||||
[--show-model-warnings | --no-show-model-warnings]
|
||||
[--max-chat-history-tokens]
|
||||
[--cache-prompts | --no-cache-prompts]
|
||||
@@ -78,7 +75,11 @@ usage: aider [-h] [--model] [--opus] [--sonnet] [--haiku] [--4]
|
||||
[--suggest-shell-commands | --no-suggest-shell-commands]
|
||||
[--fancy-input | --no-fancy-input]
|
||||
[--multiline | --no-multiline]
|
||||
[--detect-urls | --no-detect-urls] [--editor]
|
||||
[--notifications | --no-notifications]
|
||||
[--notifications-command]
|
||||
[--detect-urls | --no-detect-urls] [--editor] [--opus]
|
||||
[--sonnet] [--haiku] [--4] [--4o] [--mini] [--4-turbo]
|
||||
[--35turbo] [--deepseek] [--o1-mini] [--o1-preview]
|
||||
|
||||
```
|
||||
|
||||
@@ -96,58 +97,6 @@ Aliases:
|
||||
Specify the model to use for the main chat
|
||||
Environment variable: `AIDER_MODEL`
|
||||
|
||||
### `--opus`
|
||||
Use claude-3-opus-20240229 model for the main chat
|
||||
Environment variable: `AIDER_OPUS`
|
||||
|
||||
### `--sonnet`
|
||||
Use claude-3-5-sonnet-20241022 model for the main chat
|
||||
Environment variable: `AIDER_SONNET`
|
||||
|
||||
### `--haiku`
|
||||
Use claude-3-5-haiku-20241022 model for the main chat
|
||||
Environment variable: `AIDER_HAIKU`
|
||||
|
||||
### `--4`
|
||||
Use gpt-4-0613 model for the main chat
|
||||
Environment variable: `AIDER_4`
|
||||
Aliases:
|
||||
- `--4`
|
||||
- `-4`
|
||||
|
||||
### `--4o`
|
||||
Use gpt-4o model for the main chat
|
||||
Environment variable: `AIDER_4O`
|
||||
|
||||
### `--mini`
|
||||
Use gpt-4o-mini model for the main chat
|
||||
Environment variable: `AIDER_MINI`
|
||||
|
||||
### `--4-turbo`
|
||||
Use gpt-4-1106-preview model for the main chat
|
||||
Environment variable: `AIDER_4_TURBO`
|
||||
|
||||
### `--35turbo`
|
||||
Use gpt-3.5-turbo model for the main chat
|
||||
Environment variable: `AIDER_35TURBO`
|
||||
Aliases:
|
||||
- `--35turbo`
|
||||
- `--35-turbo`
|
||||
- `--3`
|
||||
- `-3`
|
||||
|
||||
### `--deepseek`
|
||||
Use deepseek/deepseek-chat model for the main chat
|
||||
Environment variable: `AIDER_DEEPSEEK`
|
||||
|
||||
### `--o1-mini`
|
||||
Use o1-mini model for the main chat
|
||||
Environment variable: `AIDER_O1_MINI`
|
||||
|
||||
### `--o1-preview`
|
||||
Use o1-preview model for the main chat
|
||||
Environment variable: `AIDER_O1_PREVIEW`
|
||||
|
||||
## API Keys and settings:
|
||||
|
||||
### `--openai-api-key VALUE`
|
||||
@@ -215,6 +164,10 @@ Environment variable: `AIDER_ALIAS`
|
||||
Set the reasoning_effort API parameter (default: not set)
|
||||
Environment variable: `AIDER_REASONING_EFFORT`
|
||||
|
||||
### `--thinking-tokens VALUE`
|
||||
Set the thinking token budget for models that support it (default: not set)
|
||||
Environment variable: `AIDER_THINKING_TOKENS`
|
||||
|
||||
### `--verify-ssl`
|
||||
Verify the SSL cert when connecting to models (default: True)
|
||||
Default: True
|
||||
@@ -750,6 +703,18 @@ Aliases:
|
||||
- `--multiline`
|
||||
- `--no-multiline`
|
||||
|
||||
### `--notifications`
|
||||
Enable/disable terminal bell notifications when LLM responses are ready (default: False)
|
||||
Default: False
|
||||
Environment variable: `AIDER_NOTIFICATIONS`
|
||||
Aliases:
|
||||
- `--notifications`
|
||||
- `--no-notifications`
|
||||
|
||||
### `--notifications-command COMMAND`
|
||||
Specify a command to run for notifications instead of the terminal bell. If not specified, a default command for your OS may be used.
|
||||
Environment variable: `AIDER_NOTIFICATIONS_COMMAND`
|
||||
|
||||
### `--detect-urls`
|
||||
Enable/disable detection and offering to add URLs to chat (default: True)
|
||||
Default: True
|
||||
@@ -761,4 +726,69 @@ Aliases:
|
||||
### `--editor VALUE`
|
||||
Specify which editor to use for the /editor command
|
||||
Environment variable: `AIDER_EDITOR`
|
||||
|
||||
## Deprecated model settings:
|
||||
|
||||
### `--opus`
|
||||
Use claude-3-opus-20240229 model for the main chat (deprecated, use --model)
|
||||
Default: False
|
||||
Environment variable: `AIDER_OPUS`
|
||||
|
||||
### `--sonnet`
|
||||
Use anthropic/claude-3-7-sonnet-20250219 model for the main chat (deprecated, use --model)
|
||||
Default: False
|
||||
Environment variable: `AIDER_SONNET`
|
||||
|
||||
### `--haiku`
|
||||
Use claude-3-5-haiku-20241022 model for the main chat (deprecated, use --model)
|
||||
Default: False
|
||||
Environment variable: `AIDER_HAIKU`
|
||||
|
||||
### `--4`
|
||||
Use gpt-4-0613 model for the main chat (deprecated, use --model)
|
||||
Default: False
|
||||
Environment variable: `AIDER_4`
|
||||
Aliases:
|
||||
- `--4`
|
||||
- `-4`
|
||||
|
||||
### `--4o`
|
||||
Use gpt-4o model for the main chat (deprecated, use --model)
|
||||
Default: False
|
||||
Environment variable: `AIDER_4O`
|
||||
|
||||
### `--mini`
|
||||
Use gpt-4o-mini model for the main chat (deprecated, use --model)
|
||||
Default: False
|
||||
Environment variable: `AIDER_MINI`
|
||||
|
||||
### `--4-turbo`
|
||||
Use gpt-4-1106-preview model for the main chat (deprecated, use --model)
|
||||
Default: False
|
||||
Environment variable: `AIDER_4_TURBO`
|
||||
|
||||
### `--35turbo`
|
||||
Use gpt-3.5-turbo model for the main chat (deprecated, use --model)
|
||||
Default: False
|
||||
Environment variable: `AIDER_35TURBO`
|
||||
Aliases:
|
||||
- `--35turbo`
|
||||
- `--35-turbo`
|
||||
- `--3`
|
||||
- `-3`
|
||||
|
||||
### `--deepseek`
|
||||
Use deepseek/deepseek-chat model for the main chat (deprecated, use --model)
|
||||
Default: False
|
||||
Environment variable: `AIDER_DEEPSEEK`
|
||||
|
||||
### `--o1-mini`
|
||||
Use o1-mini model for the main chat (deprecated, use --model)
|
||||
Default: False
|
||||
Environment variable: `AIDER_O1_MINI`
|
||||
|
||||
### `--o1-preview`
|
||||
Use o1-preview model for the main chat (deprecated, use --model)
|
||||
Default: False
|
||||
Environment variable: `AIDER_O1_PREVIEW`
|
||||
<!--[[[end]]]-->
|
||||
|
||||
@@ -6,10 +6,58 @@ description: How to configure reasoning model settings from secondary providers.
|
||||
|
||||
# Reasoning models
|
||||
|
||||

|
||||
|
||||
## Reasoning effort
|
||||
|
||||
You can use the `--reasoning-effort` switch to control the reasoning effort
|
||||
of models which support this setting.
|
||||
This switch is useful for OpenAI's reasoning models.
|
||||
|
||||
You can also use the `--thinking-tokens` switch to request
|
||||
the model use a certain number of thinking tokens.
|
||||
This switch is useful for Sonnet 3.7.
|
||||
|
||||
|
||||
## Thinking tokens in XML tags
|
||||
|
||||
There is also a `reasoning_tag` setting, which takes the name of an XML tag
|
||||
that the model uses to wrap its reasoning/thinking output.
|
||||
|
||||
For example when using DeepSeek R1 from Fireworks, the reasoning comes back inside
|
||||
`<think>...</think>` tags, so aider's settings
|
||||
include `reasoning_tag: think`.
|
||||
|
||||
```
|
||||
<think>
|
||||
The user wants me to greet them!
|
||||
</think>
|
||||
|
||||
Hello!
|
||||
```
|
||||
|
||||
Aider will display the thinking/reasoning output,
|
||||
but it won't be used for file editing instructions, etc.
|
||||
Aider will rely on the non-thinking output for instructions on how to make code changes, etc.
|
||||
|
||||
```yaml
|
||||
- name: fireworks_ai/accounts/fireworks/models/deepseek-r1
|
||||
edit_format: diff
|
||||
weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
max_tokens: 160000
|
||||
use_temperature: false
|
||||
editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
editor_edit_format: editor-diff
|
||||
reasoning_tag: think # <---
|
||||
```
|
||||
|
||||
## Reasoning model limitations
|
||||
|
||||
Many
|
||||
"reasoning" models have restrictions on how they can be used:
|
||||
they sometimes prohibit streaming, use of temperature and/or the system prompt.
|
||||
Some also support different levels of "reasoning effort".
|
||||
|
||||
Aider is configured to work properly with these models
|
||||
when served through major provider APIs.
|
||||
@@ -21,12 +69,7 @@ and see errors related to temperature or system prompt.
|
||||
Include settings for your new provider in `.aider.model.setting.yml` file
|
||||
at the root of your project or in your home directory.
|
||||
|
||||
## Reasoning effort
|
||||
|
||||
You can use the `--reasoning-effort` switch to control the reasoning effort
|
||||
of models which support this setting.
|
||||
|
||||
## Temperature, streaming and system prompt
|
||||
### Temperature, streaming and system prompt
|
||||
|
||||
You should find one of the existing model setting configuration entries
|
||||
for the model you are interested in, say o3-mini:
|
||||
@@ -63,28 +106,3 @@ settings for a different provider.
|
||||
editor_model_name: azure/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
```
|
||||
|
||||
## Thinking tokens
|
||||
|
||||
There is also a `remove_reasoning` setting, which takes the name of a tag.
|
||||
This is used to remove everything inside that XML tag pair.
|
||||
|
||||
For example when using DeepSeek R1 from Fireworks, the reasoning comes back inside
|
||||
`<think>...</think>` tags, so aider's settings
|
||||
include `remove_reasoning: think` to remove that part of the response.
|
||||
|
||||
Aider will still *display* think reasoning output, it just won't use it
|
||||
to find file editing instructions, etc.
|
||||
|
||||
```yaml
|
||||
- name: fireworks_ai/accounts/fireworks/models/deepseek-r1
|
||||
edit_format: diff
|
||||
weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
max_tokens: 160000
|
||||
use_temperature: false
|
||||
editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
editor_edit_format: editor-diff
|
||||
remove_reasoning: think # <---
|
||||
```
|
||||
|
||||
@@ -249,14 +249,12 @@ tr:hover { background-color: #f5f5f5; }
|
||||
</style>
|
||||
<table>
|
||||
<tr><th>Model Name</th><th class='right'>Total Tokens</th><th class='right'>Percent</th></tr>
|
||||
<tr><td>claude-3-5-sonnet-20241022</td><td class='right'>946,736</td><td class='right'>63.1%</td></tr>
|
||||
<tr><td>fireworks_ai/accounts/fireworks/models/deepseek-v3</td><td class='right'>273,005</td><td class='right'>18.2%</td></tr>
|
||||
<tr><td>deepseek/deepseek-chat</td><td class='right'>97,745</td><td class='right'>6.5%</td></tr>
|
||||
<tr><td>claude-3-5-haiku-20241022</td><td class='right'>69,203</td><td class='right'>4.6%</td></tr>
|
||||
<tr><td>fireworks_ai/accounts/fireworks/models/deepseek-r1</td><td class='right'>65,251</td><td class='right'>4.3%</td></tr>
|
||||
<tr><td>o3-mini</td><td class='right'>46,467</td><td class='right'>3.1%</td></tr>
|
||||
<tr><td>gemini/REDACTED</td><td class='right'>1,859</td><td class='right'>0.1%</td></tr>
|
||||
<tr><td>ollama_chat/REDACTED</td><td class='right'>309</td><td class='right'>0.0%</td></tr>
|
||||
<tr><td>anthropic/claude-3-7-sonnet-20250219</td><td class='right'>1,063,669</td><td class='right'>92.4%</td></tr>
|
||||
<tr><td>openrouter/deepseek/deepseek-r1</td><td class='right'>40,786</td><td class='right'>3.5%</td></tr>
|
||||
<tr><td>o3-mini</td><td class='right'>32,728</td><td class='right'>2.8%</td></tr>
|
||||
<tr><td>gpt-4o</td><td class='right'>8,092</td><td class='right'>0.7%</td></tr>
|
||||
<tr><td>groq/REDACTED</td><td class='right'>3,914</td><td class='right'>0.3%</td></tr>
|
||||
<tr><td>fireworks_ai/accounts/fireworks/models/deepseek-r1</td><td class='right'>1,873</td><td class='right'>0.2%</td></tr>
|
||||
</table>
|
||||
|
||||
{: .note :}
|
||||
|
||||
@@ -22,7 +22,7 @@ Here are
|
||||
You need an key from an API provider to work with most models:
|
||||
|
||||
- [OpenAI](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key) provides o1, o3-mini, gpt-4o and other models. Note that paying for an API key is different than being a "ChatGPT" subscriber.
|
||||
- [Anthropic](https://docs.anthropic.com/claude/reference/getting-started-with-the-api) provides Claude 3.5 Sonnet and Haiku.
|
||||
- [Anthropic](https://docs.anthropic.com/claude/reference/getting-started-with-the-api) provides Claude 3.7 Sonnet and Haiku.
|
||||
- [DeepSeek](https://platform.deepseek.com/api_keys) provides DeepSeek R1 and DeepSeek Chat V3.
|
||||
- [OpenRouter](https://openrouter.ai/keys) allows you to access models from many providers using a single key.
|
||||
|
||||
|
||||
@@ -57,10 +57,10 @@ cog.out(get_supported_languages_md())
|
||||
|:--------:|:--------------:|:--------:|:------:|
|
||||
| bash | .bash | | ✓ |
|
||||
| c | .c | ✓ | ✓ |
|
||||
| c_sharp | .cs | ✓ | ✓ |
|
||||
| commonlisp | .cl | | ✓ |
|
||||
| cpp | .cc | ✓ | ✓ |
|
||||
| cpp | .cpp | ✓ | ✓ |
|
||||
| csharp | .cs | ✓ | ✓ |
|
||||
| css | .css | | ✓ |
|
||||
| dockerfile | .dockerfile | | ✓ |
|
||||
| dot | .dot | | ✓ |
|
||||
@@ -85,12 +85,11 @@ cog.out(get_supported_languages_md())
|
||||
| kotlin | .kt | ✓ | ✓ |
|
||||
| lua | .lua | | ✓ |
|
||||
| make | .mk | | ✓ |
|
||||
| markdown | .md | | ✓ |
|
||||
| objc | .m | | ✓ |
|
||||
| ocaml | .ml | ✓ | ✓ |
|
||||
| perl | .pl | | ✓ |
|
||||
| php | .php | ✓ | ✓ |
|
||||
| python | .py | ✓ | ✓ |
|
||||
| ql | .ql | ✓ | ✓ |
|
||||
| r | .R | | ✓ |
|
||||
| r | .r | | ✓ |
|
||||
| regex | .regex | | ✓ |
|
||||
|
||||
@@ -71,7 +71,7 @@ The model also has to successfully apply all its changes to the source file with
|
||||
<script>
|
||||
{% assign data_source = edit_sorted %}
|
||||
{% assign pass_rate_field = "pass_rate_2" %}
|
||||
{% assign highlight_model = "xxxxxxxxxxx" %}
|
||||
{% assign highlight_model = "xxxxxx" %}
|
||||
{% include leaderboard.js %}
|
||||
</script>
|
||||
<style>
|
||||
@@ -116,6 +116,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
|
||||
latest_mod_date = max(mod_dates)
|
||||
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
|
||||
]]]-->
|
||||
January 31, 2025.
|
||||
March 07, 2025.
|
||||
<!--[[[end]]]-->
|
||||
</p>
|
||||
|
||||
@@ -5,6 +5,15 @@ nav_order: 800
|
||||
|
||||
# Benchmark notes
|
||||
|
||||
## Notes on pricing
|
||||
|
||||
All pricing information is the cost to run the benchmark at the time it was
|
||||
run.
|
||||
Providers change their pricing, and every benchmark run ends up with a slightly
|
||||
different cost.
|
||||
Pricing is provided on a *best efforts* basis, and may not always be current
|
||||
or fully accurate.
|
||||
|
||||
## Notes on benchmarking results
|
||||
|
||||
The key benchmarking results are:
|
||||
|
||||
@@ -17,7 +17,7 @@ description: Aider can connect to most LLMs for AI pair programming.
|
||||
Aider works best with these models, which are skilled at editing code:
|
||||
|
||||
- [DeepSeek R1 and V3](/docs/llms/deepseek.html)
|
||||
- [Claude 3.5 Sonnet](/docs/llms/anthropic.html)
|
||||
- [Claude 3.7 Sonnet](/docs/llms/anthropic.html)
|
||||
- [OpenAI o1, o3-mini and GPT-4o](/docs/llms/openai.html)
|
||||
|
||||
|
||||
|
||||
@@ -19,11 +19,11 @@ python -m pip install -U aider-chat
|
||||
export ANTHROPIC_API_KEY=<key> # Mac/Linux
|
||||
setx ANTHROPIC_API_KEY <key> # Windows, restart shell after setx
|
||||
|
||||
# Aider uses Claude 3.5 Sonnet by default (or use --sonnet)
|
||||
# Aider uses Claude 3.7 Sonnet by default
|
||||
aider
|
||||
|
||||
# Claude 3 Opus
|
||||
aider --opus
|
||||
aider --model claude-3-opus-20240229
|
||||
|
||||
# List models available from Anthropic
|
||||
aider --list-models anthropic/
|
||||
@@ -39,3 +39,34 @@ with more generous rate limits.
|
||||
You can use `aider --model <model-name>` to use any other Anthropic model.
|
||||
For example, if you want to use a specific version of Opus
|
||||
you could do `aider --model claude-3-opus-20240229`.
|
||||
|
||||
## Thinking tokens
|
||||
|
||||
Aider can work with Sonnet 3.7's new thinking tokens, but does not ask Sonnet to use
|
||||
thinking tokens by default.
|
||||
|
||||
Enabling thinking currently requires manual configuration.
|
||||
You need to add the following to your `.aider.model.settings.yml`
|
||||
[model settings file](/docs/config/adv-model-settings.html#model-settings).
|
||||
Adjust the `budget_tokens` value to change the target number of thinking tokens.
|
||||
|
||||
```yaml
|
||||
- name: anthropic/claude-3-7-sonnet-20250219
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
use_temperature: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
thinking:
|
||||
type: enabled
|
||||
budget_tokens: 32000 # Adjust this number
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-3-7-sonnet-20250219
|
||||
editor_edit_format: editor-diff
|
||||
```
|
||||
|
||||
More streamlined support will be coming soon.
|
||||
|
||||
@@ -11,6 +11,32 @@ You will need to have an AWS account with access to the Bedrock service.
|
||||
To configure Aider to use the Amazon Bedrock API, you need to set up your AWS credentials.
|
||||
This can be done using the AWS CLI or by setting environment variables.
|
||||
|
||||
## Select a Model from Amazon Bedrock
|
||||
|
||||
Before you can use a model through Amazon Bedrock, you must "enable" the model under the **Model
|
||||
Access** screen in the AWS Management Console.
|
||||
To find the `Model ID`, open the **Model Catalog** area in the Bedrock console, select the model
|
||||
you want to use, and the find the `modelId` property under the "Usage" heading.
|
||||
|
||||
### Bedrock Inference Profiles
|
||||
|
||||
Amazon Bedrock has added support for a new feature called [cross-region "inference profiles."](https://aws.amazon.com/about-aws/whats-new/2024/09/amazon-bedrock-knowledge-bases-cross-region-inference/)
|
||||
Some models hosted in Bedrock _only_ support these inference profiles.
|
||||
If you're using one of these models, then you will need to use the `Inference Profile ID`
|
||||
instead of the `Model ID` from the **Model Catalog** screen, in the AWS Management Console.
|
||||
For example, the Claude Sonnet 3.7 model, release in February 2025, exclusively supports
|
||||
inference through inference profiles. To use this model, you would use the
|
||||
`us.anthropic.claude-3-7-sonnet-20250219-v1:0` Inference Profile ID.
|
||||
In the Amazon Bedrock console, go to Inference and Assessment ➡️ Cross-region Inference
|
||||
to find the `Inference Profile ID` value.
|
||||
|
||||
If you attempt to use a `Model ID` for a model that exclusively supports the Inference Profile
|
||||
feature, you will receive an error message like the following:
|
||||
|
||||
> litellm.BadRequestError: BedrockException - b'{"message":"Invocation of model ID
|
||||
anthropic.claude-3-7-sonnet-20250219-v1:0 with on-demand throughput isn\xe2\x80\x99t supported. Retry your
|
||||
request with the ID or ARN of an inference profile that contains this model."}'
|
||||
|
||||
## AWS CLI Configuration
|
||||
|
||||
If you haven't already, install the [AWS CLI](https://aws.amazon.com/cli/) and configure it with your credentials:
|
||||
@@ -39,6 +65,16 @@ export AWS_PROFILE=your-profile
|
||||
You can add these to your
|
||||
[.env file](/docs/config/dotenv.html).
|
||||
|
||||
### Set Environment Variables with PowerShell
|
||||
|
||||
If you're using PowerShell on MacOS, Linux, or Windows, you can set the same AWS configuration environment variables with these commands.
|
||||
|
||||
```pwsh
|
||||
$env:AWS_ACCESS_KEY_ID = 'your_access_key'
|
||||
$env:AWS_SECRET_ACCESS_KEY = 'your_secret_key'
|
||||
$env:AWS_REGION = 'us-west-2' # Put whichever AWS region that you'd like, that the Bedrock service supports.
|
||||
```
|
||||
|
||||
## Install boto3
|
||||
|
||||
The AWS Bedrock provider requires the `boto3` package in order to function correctly:
|
||||
|
||||
@@ -16,6 +16,6 @@ export DEEPSEEK_API_KEY=<key> # Mac/Linux
|
||||
setx DEEPSEEK_API_KEY <key> # Windows, restart shell after setx
|
||||
|
||||
# Use DeepSeek Chat v3
|
||||
aider --deepseek
|
||||
aider --model deepseek/deepseek-chat
|
||||
```
|
||||
|
||||
|
||||
@@ -10,16 +10,18 @@ To use LM Studio:
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
|
||||
export LM_STUDIO_API_KEY=<key> # Mac/Linux
|
||||
setx LM_STUDIO_API_KEY <key> # Windows, restart shell after setx
|
||||
# Must set a value here even if its a dummy value
|
||||
export LM_STUDIO_API_KEY=dummy-api-key # Mac/Linux
|
||||
setx LM_STUDIO_API_KEY dummy-api-key # Windows, restart shell after setx
|
||||
|
||||
export LM_STUDIO_API_BASE=<url> # Mac/Linux
|
||||
setx LM_STUDIO_API_BASE <url> # Windows, restart shell after setx
|
||||
# LM Studio default server URL is http://localhost:1234/v1
|
||||
export LM_STUDIO_API_BASE=http://localhost:1234/v1 # Mac/Linux
|
||||
setx LM_STUDIO_API_BASE http://localhost:1234/v1 # Windows, restart shell after setx
|
||||
|
||||
aider --model lm_studio/<your-model-name>
|
||||
```
|
||||
|
||||
|
||||
**Note:** Even though LM Studio doesn't require an API Key out of the box the `LM_STUDIO_API_KEY` must have a dummy value like `dummy-api-key` set or the client request will fail trying to send an empty `Bearer` token.
|
||||
|
||||
See the [model warnings](warnings.html)
|
||||
section for information on warnings which will occur
|
||||
|
||||
@@ -23,7 +23,7 @@ aider --model o3-mini --api-key openai=<key>
|
||||
aider --model o1-mini --api-key openai=<key>
|
||||
|
||||
# GPT-4o
|
||||
aider --4o --api-key openai=<key>
|
||||
aider --model gpt-4o --api-key openai=<key>
|
||||
|
||||
# List models available from OpenAI
|
||||
aider --list-models openai/
|
||||
|
||||
@@ -29,7 +29,7 @@ python -m pip install -U aider-chat
|
||||
export OPENROUTER_API_KEY=<key> # Mac/Linux
|
||||
setx OPENROUTER_API_KEY <key> # Windows, restart shell after setx
|
||||
|
||||
aider --model openrouter/anthropic/claude-3.5-sonnet
|
||||
aider --model openrouter/anthropic/claude-3.7-sonnet
|
||||
```
|
||||
|
||||
|
||||
@@ -44,17 +44,17 @@ to allow use of all models.
|
||||
OpenRouter often has multiple providers serving each model.
|
||||
You can control which OpenRouter providers are used for your requests in two ways:
|
||||
|
||||
1. By "ignoring" certain providers in your
|
||||
1. By "ignoring" certain providers in your
|
||||
[OpenRouter account settings](https://openrouter.ai/settings/preferences).
|
||||
This disables those named providers across all the models that you access via OpenRouter.
|
||||
|
||||
2. By configuring "provider routing" in a `.aider.model.settings.yml` file.
|
||||
|
||||
Place that file in your home directory or the root if your git project, with
|
||||
Place that file in your home directory or the root of your git project, with
|
||||
entries like this:
|
||||
|
||||
```yaml
|
||||
- name: openrouter/anthropic/claude-3.5-sonnet
|
||||
- name: openrouter/anthropic/claude-3.7-sonnet
|
||||
extra_params:
|
||||
extra_body:
|
||||
provider:
|
||||
|
||||
@@ -57,11 +57,17 @@ cog.out(model_list)
|
||||
]]]-->
|
||||
- anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
- anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
- anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
- claude-3-5-haiku-20241022
|
||||
- claude-3-5-haiku-latest
|
||||
- claude-3-5-sonnet-20240620
|
||||
- claude-3-5-sonnet-20241022
|
||||
- claude-3-5-sonnet-latest
|
||||
- claude-3-7-sonnet-20250219
|
||||
- claude-3-7-sonnet-latest
|
||||
- claude-3-haiku-20240307
|
||||
- claude-3-opus-20240229
|
||||
- claude-3-opus-latest
|
||||
- claude-3-sonnet-20240229
|
||||
- codestral/codestral-2405
|
||||
- codestral/codestral-latest
|
||||
@@ -93,15 +99,18 @@ cog.out(model_list)
|
||||
- mistral/pixtral-large-2411
|
||||
- mistral/pixtral-large-latest
|
||||
- openrouter/anthropic/claude-3.5-sonnet
|
||||
- openrouter/anthropic/claude-3.7-sonnet
|
||||
- openrouter/deepseek/deepseek-r1
|
||||
- us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
- us.anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
- us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
- vertex_ai/claude-3-5-haiku
|
||||
- vertex_ai/claude-3-5-haiku@20241022
|
||||
- vertex_ai/claude-3-5-sonnet
|
||||
- vertex_ai/claude-3-5-sonnet-v2
|
||||
- vertex_ai/claude-3-5-sonnet-v2@20241022
|
||||
- vertex_ai/claude-3-5-sonnet@20240620
|
||||
- vertex_ai/claude-3-7-sonnet@20250219
|
||||
- vertex_ai/claude-3-haiku
|
||||
- vertex_ai/claude-3-haiku@20240307
|
||||
- vertex_ai/claude-3-opus
|
||||
|
||||
@@ -35,8 +35,8 @@ Aider also sends the LLM a [map of your entire git repo](https://aider.chat/docs
|
||||
|
||||
## Use a more capable model
|
||||
|
||||
If possible try using GPT-4o, Claude 3.5 Sonnet, DeepSeek V3 or DeepSeek R1.
|
||||
They are the strongest and most capable models.
|
||||
If possible try using GPT-4o, o3-mini, Claude 3.7 Sonnet, DeepSeek V3 or DeepSeek R1.
|
||||
They are the strong and capable models.
|
||||
|
||||
Weaker models
|
||||
are more prone to
|
||||
|
||||
@@ -13,14 +13,14 @@ command line arguments, like this:
|
||||
# Work with DeepSeek via DeepSeek's API
|
||||
aider --model deepseek --api-key deepseek=your-key-goes-here
|
||||
|
||||
# Work with Claude 3.5 Sonnet via Anthropic's API
|
||||
# Work with Claude 3.7 Sonnet via Anthropic's API
|
||||
aider --model sonnet --api-key anthropic=your-key-goes-here
|
||||
|
||||
# Work with o3-mini via OpenAI's API
|
||||
aider --model o3-mini --api-key openai=your-key-goes-here
|
||||
|
||||
# Work with Sonnet via OpenRouter's API
|
||||
aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here
|
||||
aider --model openrouter/anthropic/claude-3.7-sonnet --api-key openrouter=your-key-goes-here
|
||||
|
||||
# Work with DeepSeek Chat V3 via OpenRouter's API
|
||||
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here
|
||||
|
||||
@@ -71,7 +71,7 @@ relevant context from the rest of your repo.
|
||||
# o3-mini
|
||||
$ aider --model o3-mini --api-key openai=<key>
|
||||
|
||||
# Claude 3.5 Sonnet
|
||||
# Claude 3.7 Sonnet
|
||||
$ aider --model sonnet --api-key anthropic=<key>
|
||||
```
|
||||
|
||||
|
||||
@@ -41,8 +41,8 @@ Aider will directly edit the code in your local source files,
|
||||
and [git commit the changes](https://aider.chat/docs/git.html)
|
||||
with sensible commit messages.
|
||||
You can start a new project or work with an existing git repo.
|
||||
Aider works well with GPT 3.5, GPT-4, GPT-4 Turbo with Vision,
|
||||
and Claude 3 Opus.
|
||||
Aider works well with
|
||||
GPT-4o, Sonnet 3.7, and DeepSeek Chat V3 & R1.
|
||||
It also supports [connecting to almost any LLM](https://aider.chat/docs/llms.html).
|
||||
|
||||
Use the `--browser` switch to launch the browser version of aider:
|
||||
|
||||
@@ -11,7 +11,7 @@ You can add images and URLs to the aider chat.
|
||||
## Images
|
||||
|
||||
Aider supports working with image files for many vision-capable models
|
||||
like GPT-4o and Claude 3.5 Sonnet.
|
||||
like GPT-4o and Claude 3.7 Sonnet.
|
||||
Adding images to a chat can be helpful in many situations:
|
||||
|
||||
- Add screenshots of web pages or UIs that you want aider to build or modify.
|
||||
|
||||
87
aider/website/docs/usage/notifications.md
Normal file
87
aider/website/docs/usage/notifications.md
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
title: Notifications
|
||||
highlight_image: /assets/notifications.jpg
|
||||
parent: Usage
|
||||
nav_order: 760
|
||||
description: Aider can notify you when it's waiting for your input.
|
||||
---
|
||||
|
||||
# Notifications
|
||||
|
||||
Aider can notify you when it's done working and is
|
||||
waiting for your input.
|
||||
This is especially useful for long-running operations or when you're multitasking.
|
||||
|
||||
## Usage
|
||||
|
||||
Enable notifications with the `--notifications` flag:
|
||||
|
||||
```bash
|
||||
aider --notifications
|
||||
```
|
||||
|
||||
When enabled, aider will notify you when the LLM has finished generating a response and is waiting for your input.
|
||||
|
||||
## OS-Specific Notifications
|
||||
|
||||
Aider automatically detects your operating system and uses an appropriate notification method:
|
||||
|
||||
- **macOS**: Uses `terminal-notifier` if available, falling back to AppleScript notifications
|
||||
- **Linux**: Uses `notify-send` or `zenity` if available
|
||||
- **Windows**: Uses PowerShell to display a message box
|
||||
|
||||
## Custom Notification Commands
|
||||
|
||||
You can specify a custom notification command with `--notifications-command`:
|
||||
|
||||
```bash
|
||||
aider --notifications-command "your-custom-command"
|
||||
```
|
||||
|
||||
For example, on macOS you might use:
|
||||
|
||||
```bash
|
||||
aider --notifications-command "say 'Aider is ready'"
|
||||
```
|
||||
|
||||
### Remote Notifications
|
||||
|
||||
For remote notifications you could use [Apprise](https://github.com/caronc/apprise),
|
||||
which is a cross-platform Python library for sending notifications to various services.
|
||||
|
||||
We can use Apprise to send notifications to Slack
|
||||
|
||||
```bash
|
||||
aider --notifications-command "apprise -b 'Aider is ready' 'slack://your-slack-webhook-token'"
|
||||
```
|
||||
|
||||
or Discord
|
||||
```bash
|
||||
aider --notifications-command "apprise -b 'Aider is ready' 'discord://your-discord-webhook-token'"
|
||||
```
|
||||
|
||||
or even to your phone via Pushbullet
|
||||
```bash
|
||||
aider --notifications-command "apprise -b 'Aider is ready' 'pbul://your-pushbullet-access-token'"
|
||||
```
|
||||
|
||||
Check more how to use and configure Apprise on their GitHub page.
|
||||
|
||||
## Configuration
|
||||
|
||||
You can add these settings to your configuration file:
|
||||
|
||||
```yaml
|
||||
# Enable notifications
|
||||
notifications: true
|
||||
|
||||
# Optional custom notification command
|
||||
notifications_command: "your-custom-command"
|
||||
```
|
||||
|
||||
Or in your `.env` file:
|
||||
|
||||
```
|
||||
AIDER_NOTIFICATIONS=true
|
||||
AIDER_NOTIFICATIONS_COMMAND=your-custom-command
|
||||
```
|
||||
@@ -33,7 +33,7 @@ cog.out(text)
|
||||
Aider lets you pair program with LLMs,
|
||||
to edit code in your local git repository.
|
||||
Start a new project or work with an existing code base.
|
||||
Aider works best with Claude 3.5 Sonnet, DeepSeek R1 & Chat V3, OpenAI o1, o3-mini & GPT-4o. Aider can [connect to almost any LLM, including local models](https://aider.chat/docs/llms.html).
|
||||
Aider works best with Claude 3.7 Sonnet, DeepSeek R1 & Chat V3, OpenAI o1, o3-mini & GPT-4o. Aider can [connect to almost any LLM, including local models](https://aider.chat/docs/llms.html).
|
||||
|
||||
<!--
|
||||
<p align="center">
|
||||
@@ -81,14 +81,14 @@ cd /to/your/project
|
||||
# Work with DeepSeek via DeepSeek's API
|
||||
aider --model deepseek --api-key deepseek=your-key-goes-here
|
||||
|
||||
# Work with Claude 3.5 Sonnet via Anthropic's API
|
||||
# Work with Claude 3.7 Sonnet via Anthropic's API
|
||||
aider --model sonnet --api-key anthropic=your-key-goes-here
|
||||
|
||||
# Work with GPT-4o via OpenAI's API
|
||||
aider --model gpt-4o --api-key openai=your-key-goes-here
|
||||
|
||||
# Work with Sonnet via OpenRouter's API
|
||||
aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here
|
||||
aider --model openrouter/anthropic/claude-3.7-sonnet --api-key openrouter=your-key-goes-here
|
||||
|
||||
# Work with DeepSeek via OpenRouter's API
|
||||
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here
|
||||
@@ -122,7 +122,7 @@ Pair program with AI.
|
||||
- [Add images to the chat](https://aider.chat/docs/usage/images-urls.html) (GPT-4o, Claude 3.5 Sonnet, etc).
|
||||
- [Add URLs to the chat](https://aider.chat/docs/usage/images-urls.html) and aider will read their content.
|
||||
- [Code with your voice](https://aider.chat/docs/usage/voice.html).
|
||||
- Aider works best with Claude 3.5 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html).
|
||||
- Aider works best with Claude 3.7 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html).
|
||||
|
||||
|
||||
## Top tier performance
|
||||
|
||||
@@ -342,6 +342,7 @@ def main(
|
||||
LONG_TIMEOUT = 24 * 60 * 60
|
||||
sendchat.RETRY_TIMEOUT = LONG_TIMEOUT
|
||||
base_coder.RETRY_TIMEOUT = LONG_TIMEOUT
|
||||
models.RETRY_TIMEOUT = LONG_TIMEOUT
|
||||
|
||||
if threads == 1:
|
||||
all_results = []
|
||||
|
||||
@@ -15,7 +15,7 @@ HARD_SET_NUM = 3 # Number of models that defines the hard set threshold
|
||||
|
||||
def get_dirs_from_leaderboard():
|
||||
# Load the leaderboard data
|
||||
with open("aider/website/_data/edit_leaderboard.yml") as f:
|
||||
with open("aider/website/_data/polyglot_leaderboard.yml") as f:
|
||||
leaderboard = yaml.safe_load(f)
|
||||
return [(entry["dirname"], entry["model"]) for entry in leaderboard]
|
||||
|
||||
@@ -92,7 +92,7 @@ def analyze_exercise_solutions(dirs=None, topn=None, copy_hard_set=False):
|
||||
(
|
||||
entry["pass_rate_2"]
|
||||
for entry in yaml.safe_load(
|
||||
open("aider/website/_data/edit_leaderboard.yml")
|
||||
open("aider/website/_data/polyglot_leaderboard.yml")
|
||||
)
|
||||
if entry["dirname"] == dirname
|
||||
),
|
||||
|
||||
@@ -64,7 +64,7 @@ COPY . /tmp/aider
|
||||
|
||||
# Install dependencies as root
|
||||
RUN /venv/bin/python -m pip install --upgrade --no-cache-dir pip && \
|
||||
/venv/bin/python -m pip install --no-cache-dir /tmp/aider[playwright] boto3 \
|
||||
/venv/bin/python -m pip install --no-cache-dir /tmp/aider[playwright] boto3 google-cloud-aiplatform \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu && \
|
||||
rm -rf /tmp/aider
|
||||
|
||||
|
||||
363
requirements.txt
363
requirements.txt
@@ -1,238 +1,395 @@
|
||||
#
|
||||
# This file is autogenerated by pip-compile with Python 3.12
|
||||
# by the following command:
|
||||
#
|
||||
# pip-compile --allow-unsafe --output-file=requirements.txt requirements/requirements.in
|
||||
#
|
||||
aiohappyeyeballs==2.4.6
|
||||
# via aiohttp
|
||||
aiohttp==3.11.12
|
||||
# via litellm
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv pip compile --no-strip-extras --constraint=requirements/common-constraints.txt --output-file=tmp.requirements.txt requirements/requirements.in
|
||||
aiohappyeyeballs==2.5.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
aiohttp==3.11.13
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# litellm
|
||||
aiosignal==1.3.2
|
||||
# via aiohttp
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
annotated-types==0.7.0
|
||||
# via pydantic
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pydantic
|
||||
anyio==4.8.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# httpx
|
||||
# openai
|
||||
# watchfiles
|
||||
attrs==25.1.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
# jsonschema
|
||||
# referencing
|
||||
backoff==2.2.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
# posthog
|
||||
beautifulsoup4==4.13.3
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
certifi==2025.1.31
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# httpcore
|
||||
# httpx
|
||||
# requests
|
||||
cffi==1.17.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# sounddevice
|
||||
# soundfile
|
||||
charset-normalizer==3.4.1
|
||||
# via requests
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# requests
|
||||
click==8.1.8
|
||||
# via litellm
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# litellm
|
||||
configargparse==1.7
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
diff-match-patch==20241021
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
diskcache==5.6.3
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
distro==1.9.0
|
||||
# via openai
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# openai
|
||||
# posthog
|
||||
filelock==3.17.0
|
||||
# via huggingface-hub
|
||||
flake8==7.1.1
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
flake8==7.1.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
frozenlist==1.5.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
# aiosignal
|
||||
fsspec==2025.2.0
|
||||
# via huggingface-hub
|
||||
fsspec==2025.3.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
gitdb==4.0.12
|
||||
# via gitpython
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# gitpython
|
||||
gitpython==3.1.44
|
||||
# via -r requirements/requirements.in
|
||||
grep-ast==0.5.0
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
grep-ast==0.7.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
h11==0.14.0
|
||||
# via httpcore
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# httpcore
|
||||
httpcore==1.0.7
|
||||
# via httpx
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# httpx
|
||||
httpx==0.28.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# litellm
|
||||
# openai
|
||||
huggingface-hub==0.28.1
|
||||
# via tokenizers
|
||||
huggingface-hub==0.29.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# tokenizers
|
||||
idna==3.10
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# anyio
|
||||
# httpx
|
||||
# requests
|
||||
# yarl
|
||||
importlib-metadata==7.2.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
# litellm
|
||||
importlib-resources==6.5.2
|
||||
# via -r requirements/requirements.in
|
||||
jinja2==3.1.5
|
||||
# via litellm
|
||||
jiter==0.8.2
|
||||
# via openai
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
jinja2==3.1.6
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# litellm
|
||||
jiter==0.9.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# openai
|
||||
json5==0.10.0
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
jsonschema==4.23.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
# litellm
|
||||
jsonschema-specifications==2024.10.1
|
||||
# via jsonschema
|
||||
litellm==1.60.6
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# jsonschema
|
||||
litellm==1.63.5
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
markdown-it-py==3.0.0
|
||||
# via rich
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# rich
|
||||
markupsafe==3.0.2
|
||||
# via jinja2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# jinja2
|
||||
mccabe==0.7.0
|
||||
# via flake8
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# flake8
|
||||
mdurl==0.1.2
|
||||
# via markdown-it-py
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# markdown-it-py
|
||||
mixpanel==4.10.1
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
monotonic==1.6
|
||||
# via posthog
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# posthog
|
||||
multidict==6.1.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
# yarl
|
||||
networkx==3.2.1
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
numpy==1.26.4
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# -c requirements/common-constraints.txt
|
||||
# scipy
|
||||
# soundfile
|
||||
openai==1.61.1
|
||||
# via litellm
|
||||
openai==1.65.5
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# litellm
|
||||
packaging==24.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
# huggingface-hub
|
||||
pathspec==0.12.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
# grep-ast
|
||||
pexpect==4.9.0
|
||||
# via -r requirements/requirements.in
|
||||
pillow==10.4.0
|
||||
# via -r requirements/requirements.in
|
||||
posthog==3.11.0
|
||||
# via -r requirements/requirements.in
|
||||
prompt-toolkit==3.0.50
|
||||
# via -r requirements/requirements.in
|
||||
propcache==0.2.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
pillow==11.1.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
pip==25.0.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
posthog==3.19.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
prompt-toolkit==3.0.50
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
propcache==0.3.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
# yarl
|
||||
psutil==6.1.1
|
||||
# via -r requirements/requirements.in
|
||||
psutil==7.0.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
ptyprocess==0.7.0
|
||||
# via pexpect
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pexpect
|
||||
pycodestyle==2.12.1
|
||||
# via flake8
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# flake8
|
||||
pycparser==2.22
|
||||
# via cffi
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# cffi
|
||||
pydantic==2.10.6
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# litellm
|
||||
# openai
|
||||
pydantic-core==2.27.2
|
||||
# via pydantic
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pydantic
|
||||
pydub==0.25.1
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
pyflakes==3.2.0
|
||||
# via flake8
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# flake8
|
||||
pygments==2.19.1
|
||||
# via rich
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# rich
|
||||
pypandoc==1.15
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
pyperclip==1.9.0
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
python-dateutil==2.9.0.post0
|
||||
# via posthog
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# posthog
|
||||
python-dotenv==1.0.1
|
||||
# via litellm
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# litellm
|
||||
pyyaml==6.0.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
# huggingface-hub
|
||||
referencing==0.36.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# jsonschema
|
||||
# jsonschema-specifications
|
||||
regex==2024.11.6
|
||||
# via tiktoken
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# tiktoken
|
||||
requests==2.32.3
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
# mixpanel
|
||||
# posthog
|
||||
# tiktoken
|
||||
rich==13.9.4
|
||||
# via -r requirements/requirements.in
|
||||
rpds-py==0.22.3
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
rpds-py==0.23.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# jsonschema
|
||||
# referencing
|
||||
scipy==1.13.1
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
six==1.17.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# mixpanel
|
||||
# posthog
|
||||
# python-dateutil
|
||||
smmap==5.0.2
|
||||
# via gitdb
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# gitdb
|
||||
sniffio==1.3.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# anyio
|
||||
# openai
|
||||
sounddevice==0.5.1
|
||||
# via -r requirements/requirements.in
|
||||
soundfile==0.13.1
|
||||
# via -r requirements/requirements.in
|
||||
soupsieve==2.6
|
||||
# via beautifulsoup4
|
||||
tiktoken==0.8.0
|
||||
# via litellm
|
||||
tokenizers==0.19.1
|
||||
socksio==1.0.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
sounddevice==0.5.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
soundfile==0.13.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
soupsieve==2.6
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# beautifulsoup4
|
||||
tiktoken==0.9.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# litellm
|
||||
tokenizers==0.21.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# litellm
|
||||
tqdm==4.67.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
# openai
|
||||
tree-sitter==0.21.3
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# tree-sitter-languages
|
||||
tree-sitter-languages==1.10.2
|
||||
# via grep-ast
|
||||
# -c requirements/common-constraints.txt
|
||||
# tree-sitter-language-pack
|
||||
tree-sitter-c-sharp==0.23.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# tree-sitter-language-pack
|
||||
tree-sitter-embedded-template==0.23.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# tree-sitter-language-pack
|
||||
tree-sitter-language-pack==0.6.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# grep-ast
|
||||
tree-sitter-yaml==0.7.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# tree-sitter-language-pack
|
||||
typing-extensions==4.12.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# anyio
|
||||
# beautifulsoup4
|
||||
# huggingface-hub
|
||||
@@ -242,17 +399,25 @@ typing-extensions==4.12.2
|
||||
# referencing
|
||||
urllib3==2.3.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# mixpanel
|
||||
# requests
|
||||
watchfiles==1.0.4
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
wcwidth==0.2.13
|
||||
# via prompt-toolkit
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# prompt-toolkit
|
||||
yarl==1.18.3
|
||||
# via aiohttp
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
zipp==3.21.0
|
||||
# via importlib-metadata
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
pip==25.0
|
||||
# via -r requirements/requirements.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# importlib-metadata
|
||||
|
||||
tree-sitter==0.23.2; python_version < "3.10"
|
||||
tree-sitter==0.24.0; python_version >= "3.10"
|
||||
|
||||
514
requirements/common-constraints.txt
Normal file
514
requirements/common-constraints.txt
Normal file
@@ -0,0 +1,514 @@
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv pip compile --no-strip-extras --output-file=requirements/common-constraints.txt requirements/requirements.in requirements/requirements-browser.in requirements/requirements-dev.in requirements/requirements-help.in requirements/requirements-playwright.in
|
||||
aiohappyeyeballs==2.5.0
|
||||
# via aiohttp
|
||||
aiohttp==3.11.13
|
||||
# via
|
||||
# huggingface-hub
|
||||
# litellm
|
||||
# llama-index-core
|
||||
aiosignal==1.3.2
|
||||
# via aiohttp
|
||||
altair==5.5.0
|
||||
# via streamlit
|
||||
annotated-types==0.7.0
|
||||
# via pydantic
|
||||
anyio==4.8.0
|
||||
# via
|
||||
# httpx
|
||||
# openai
|
||||
# watchfiles
|
||||
attrs==25.1.0
|
||||
# via
|
||||
# aiohttp
|
||||
# jsonschema
|
||||
# referencing
|
||||
backoff==2.2.1
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# posthog
|
||||
beautifulsoup4==4.13.3
|
||||
# via -r requirements/requirements.in
|
||||
blinker==1.9.0
|
||||
# via streamlit
|
||||
build==1.2.2.post1
|
||||
# via pip-tools
|
||||
cachetools==5.5.2
|
||||
# via streamlit
|
||||
certifi==2025.1.31
|
||||
# via
|
||||
# httpcore
|
||||
# httpx
|
||||
# requests
|
||||
cffi==1.17.1
|
||||
# via
|
||||
# sounddevice
|
||||
# soundfile
|
||||
cfgv==3.4.0
|
||||
# via pre-commit
|
||||
charset-normalizer==3.4.1
|
||||
# via requests
|
||||
click==8.1.8
|
||||
# via
|
||||
# litellm
|
||||
# nltk
|
||||
# pip-tools
|
||||
# streamlit
|
||||
# typer
|
||||
codespell==2.4.1
|
||||
# via -r requirements/requirements-dev.in
|
||||
cogapp==3.4.1
|
||||
# via -r requirements/requirements-dev.in
|
||||
configargparse==1.7
|
||||
# via -r requirements/requirements.in
|
||||
contourpy==1.3.1
|
||||
# via matplotlib
|
||||
cycler==0.12.1
|
||||
# via matplotlib
|
||||
dataclasses-json==0.6.7
|
||||
# via llama-index-core
|
||||
deprecated==1.2.18
|
||||
# via llama-index-core
|
||||
diff-match-patch==20241021
|
||||
# via -r requirements/requirements.in
|
||||
dill==0.3.9
|
||||
# via
|
||||
# multiprocess
|
||||
# pathos
|
||||
dirtyjson==1.0.8
|
||||
# via llama-index-core
|
||||
diskcache==5.6.3
|
||||
# via -r requirements/requirements.in
|
||||
distlib==0.3.9
|
||||
# via virtualenv
|
||||
distro==1.9.0
|
||||
# via
|
||||
# openai
|
||||
# posthog
|
||||
filelock==3.17.0
|
||||
# via
|
||||
# huggingface-hub
|
||||
# torch
|
||||
# transformers
|
||||
# virtualenv
|
||||
filetype==1.2.0
|
||||
# via llama-index-core
|
||||
flake8==7.1.2
|
||||
# via -r requirements/requirements.in
|
||||
fonttools==4.56.0
|
||||
# via matplotlib
|
||||
frozenlist==1.5.0
|
||||
# via
|
||||
# aiohttp
|
||||
# aiosignal
|
||||
fsspec==2025.3.0
|
||||
# via
|
||||
# huggingface-hub
|
||||
# llama-index-core
|
||||
# torch
|
||||
gitdb==4.0.12
|
||||
# via gitpython
|
||||
gitpython==3.1.44
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# streamlit
|
||||
greenlet==3.1.1
|
||||
# via
|
||||
# playwright
|
||||
# sqlalchemy
|
||||
grep-ast==0.7.2
|
||||
# via -r requirements/requirements.in
|
||||
h11==0.14.0
|
||||
# via httpcore
|
||||
httpcore==1.0.7
|
||||
# via httpx
|
||||
httpx==0.28.1
|
||||
# via
|
||||
# litellm
|
||||
# llama-index-core
|
||||
# openai
|
||||
huggingface-hub[inference]==0.29.2
|
||||
# via
|
||||
# llama-index-embeddings-huggingface
|
||||
# sentence-transformers
|
||||
# tokenizers
|
||||
# transformers
|
||||
identify==2.6.9
|
||||
# via pre-commit
|
||||
idna==3.10
|
||||
# via
|
||||
# anyio
|
||||
# httpx
|
||||
# requests
|
||||
# yarl
|
||||
imgcat==0.6.0
|
||||
# via -r requirements/requirements-dev.in
|
||||
importlib-metadata==7.2.1
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# litellm
|
||||
importlib-resources==6.5.2
|
||||
# via -r requirements/requirements.in
|
||||
iniconfig==2.0.0
|
||||
# via pytest
|
||||
jinja2==3.1.6
|
||||
# via
|
||||
# altair
|
||||
# litellm
|
||||
# pydeck
|
||||
# torch
|
||||
jiter==0.9.0
|
||||
# via openai
|
||||
joblib==1.4.2
|
||||
# via
|
||||
# nltk
|
||||
# scikit-learn
|
||||
json5==0.10.0
|
||||
# via -r requirements/requirements.in
|
||||
jsonschema==4.23.0
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# altair
|
||||
# litellm
|
||||
jsonschema-specifications==2024.10.1
|
||||
# via jsonschema
|
||||
kiwisolver==1.4.8
|
||||
# via matplotlib
|
||||
litellm==1.63.5
|
||||
# via -r requirements/requirements.in
|
||||
llama-index-core==0.12.23.post2
|
||||
# via
|
||||
# -r requirements/requirements-help.in
|
||||
# llama-index-embeddings-huggingface
|
||||
llama-index-embeddings-huggingface==0.5.2
|
||||
# via -r requirements/requirements-help.in
|
||||
lox==0.13.0
|
||||
# via -r requirements/requirements-dev.in
|
||||
markdown-it-py==3.0.0
|
||||
# via rich
|
||||
markupsafe==3.0.2
|
||||
# via jinja2
|
||||
marshmallow==3.26.1
|
||||
# via dataclasses-json
|
||||
matplotlib==3.10.1
|
||||
# via -r requirements/requirements-dev.in
|
||||
mccabe==0.7.0
|
||||
# via flake8
|
||||
mdurl==0.1.2
|
||||
# via markdown-it-py
|
||||
mixpanel==4.10.1
|
||||
# via -r requirements/requirements.in
|
||||
monotonic==1.6
|
||||
# via posthog
|
||||
mpmath==1.3.0
|
||||
# via sympy
|
||||
multidict==6.1.0
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
multiprocess==0.70.17
|
||||
# via pathos
|
||||
mypy-extensions==1.0.0
|
||||
# via typing-inspect
|
||||
narwhals==1.30.0
|
||||
# via altair
|
||||
nest-asyncio==1.6.0
|
||||
# via llama-index-core
|
||||
networkx==3.2.1
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# llama-index-core
|
||||
# torch
|
||||
nltk==3.9.1
|
||||
# via llama-index-core
|
||||
nodeenv==1.9.1
|
||||
# via pre-commit
|
||||
numpy==1.26.4
|
||||
# via
|
||||
# -r requirements/requirements-help.in
|
||||
# contourpy
|
||||
# llama-index-core
|
||||
# matplotlib
|
||||
# pandas
|
||||
# pydeck
|
||||
# scikit-learn
|
||||
# scipy
|
||||
# soundfile
|
||||
# streamlit
|
||||
# transformers
|
||||
openai==1.65.5
|
||||
# via litellm
|
||||
packaging==24.2
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# altair
|
||||
# build
|
||||
# huggingface-hub
|
||||
# marshmallow
|
||||
# matplotlib
|
||||
# pytest
|
||||
# streamlit
|
||||
# transformers
|
||||
pandas==2.2.3
|
||||
# via
|
||||
# -r requirements/requirements-dev.in
|
||||
# streamlit
|
||||
pathos==0.3.3
|
||||
# via lox
|
||||
pathspec==0.12.1
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# grep-ast
|
||||
pexpect==4.9.0
|
||||
# via -r requirements/requirements.in
|
||||
pillow==11.1.0
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# llama-index-core
|
||||
# matplotlib
|
||||
# sentence-transformers
|
||||
# streamlit
|
||||
pip==25.0.1
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# pip-tools
|
||||
pip-tools==7.4.1
|
||||
# via -r requirements/requirements-dev.in
|
||||
platformdirs==4.3.6
|
||||
# via virtualenv
|
||||
playwright==1.50.0
|
||||
# via -r requirements/requirements-playwright.in
|
||||
pluggy==1.5.0
|
||||
# via pytest
|
||||
posthog==3.19.1
|
||||
# via -r requirements/requirements.in
|
||||
pox==0.3.5
|
||||
# via pathos
|
||||
ppft==1.7.6.9
|
||||
# via pathos
|
||||
pre-commit==4.1.0
|
||||
# via -r requirements/requirements-dev.in
|
||||
prompt-toolkit==3.0.50
|
||||
# via -r requirements/requirements.in
|
||||
propcache==0.3.0
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
protobuf==5.29.3
|
||||
# via streamlit
|
||||
psutil==7.0.0
|
||||
# via -r requirements/requirements.in
|
||||
ptyprocess==0.7.0
|
||||
# via pexpect
|
||||
pyarrow==19.0.1
|
||||
# via streamlit
|
||||
pycodestyle==2.12.1
|
||||
# via flake8
|
||||
pycparser==2.22
|
||||
# via cffi
|
||||
pydantic==2.10.6
|
||||
# via
|
||||
# litellm
|
||||
# llama-index-core
|
||||
# openai
|
||||
pydantic-core==2.27.2
|
||||
# via pydantic
|
||||
pydeck==0.9.1
|
||||
# via streamlit
|
||||
pydub==0.25.1
|
||||
# via -r requirements/requirements.in
|
||||
pyee==12.1.1
|
||||
# via playwright
|
||||
pyflakes==3.2.0
|
||||
# via flake8
|
||||
pygments==2.19.1
|
||||
# via rich
|
||||
pypandoc==1.15
|
||||
# via -r requirements/requirements.in
|
||||
pyparsing==3.2.1
|
||||
# via matplotlib
|
||||
pyperclip==1.9.0
|
||||
# via -r requirements/requirements.in
|
||||
pyproject-hooks==1.2.0
|
||||
# via
|
||||
# build
|
||||
# pip-tools
|
||||
pytest==8.3.5
|
||||
# via
|
||||
# -r requirements/requirements-dev.in
|
||||
# pytest-env
|
||||
pytest-env==1.1.5
|
||||
# via -r requirements/requirements-dev.in
|
||||
python-dateutil==2.9.0.post0
|
||||
# via
|
||||
# matplotlib
|
||||
# pandas
|
||||
# posthog
|
||||
python-dotenv==1.0.1
|
||||
# via litellm
|
||||
pytz==2025.1
|
||||
# via pandas
|
||||
pyyaml==6.0.2
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# huggingface-hub
|
||||
# llama-index-core
|
||||
# pre-commit
|
||||
# transformers
|
||||
referencing==0.36.2
|
||||
# via
|
||||
# jsonschema
|
||||
# jsonschema-specifications
|
||||
regex==2024.11.6
|
||||
# via
|
||||
# nltk
|
||||
# tiktoken
|
||||
# transformers
|
||||
requests==2.32.3
|
||||
# via
|
||||
# huggingface-hub
|
||||
# llama-index-core
|
||||
# mixpanel
|
||||
# posthog
|
||||
# streamlit
|
||||
# tiktoken
|
||||
# transformers
|
||||
rich==13.9.4
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# typer
|
||||
rpds-py==0.23.1
|
||||
# via
|
||||
# jsonschema
|
||||
# referencing
|
||||
safetensors==0.5.3
|
||||
# via transformers
|
||||
scikit-learn==1.6.1
|
||||
# via sentence-transformers
|
||||
scipy==1.13.1
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# scikit-learn
|
||||
# sentence-transformers
|
||||
semver==3.0.4
|
||||
# via -r requirements/requirements-dev.in
|
||||
sentence-transformers==3.4.1
|
||||
# via llama-index-embeddings-huggingface
|
||||
setuptools==76.0.0
|
||||
# via pip-tools
|
||||
shellingham==1.5.4
|
||||
# via typer
|
||||
six==1.17.0
|
||||
# via
|
||||
# mixpanel
|
||||
# posthog
|
||||
# python-dateutil
|
||||
smmap==5.0.2
|
||||
# via gitdb
|
||||
sniffio==1.3.1
|
||||
# via
|
||||
# anyio
|
||||
# openai
|
||||
socksio==1.0.0
|
||||
# via -r requirements/requirements.in
|
||||
sounddevice==0.5.1
|
||||
# via -r requirements/requirements.in
|
||||
soundfile==0.13.1
|
||||
# via -r requirements/requirements.in
|
||||
soupsieve==2.6
|
||||
# via beautifulsoup4
|
||||
sqlalchemy[asyncio]==2.0.38
|
||||
# via llama-index-core
|
||||
streamlit==1.43.1
|
||||
# via -r requirements/requirements-browser.in
|
||||
sympy==1.13.3
|
||||
# via torch
|
||||
tenacity==9.0.0
|
||||
# via
|
||||
# llama-index-core
|
||||
# streamlit
|
||||
threadpoolctl==3.5.0
|
||||
# via scikit-learn
|
||||
tiktoken==0.9.0
|
||||
# via
|
||||
# litellm
|
||||
# llama-index-core
|
||||
tokenizers==0.21.0
|
||||
# via
|
||||
# litellm
|
||||
# transformers
|
||||
toml==0.10.2
|
||||
# via streamlit
|
||||
torch==2.2.2
|
||||
# via
|
||||
# -r requirements/requirements-help.in
|
||||
# sentence-transformers
|
||||
tornado==6.4.2
|
||||
# via streamlit
|
||||
tqdm==4.67.1
|
||||
# via
|
||||
# huggingface-hub
|
||||
# llama-index-core
|
||||
# nltk
|
||||
# openai
|
||||
# sentence-transformers
|
||||
# transformers
|
||||
transformers==4.49.0
|
||||
# via sentence-transformers
|
||||
tree-sitter==0.24.0
|
||||
# via tree-sitter-language-pack
|
||||
tree-sitter-c-sharp==0.23.1
|
||||
# via tree-sitter-language-pack
|
||||
tree-sitter-embedded-template==0.23.2
|
||||
# via tree-sitter-language-pack
|
||||
tree-sitter-language-pack==0.6.0
|
||||
# via grep-ast
|
||||
tree-sitter-yaml==0.7.0
|
||||
# via tree-sitter-language-pack
|
||||
typer==0.15.2
|
||||
# via -r requirements/requirements-dev.in
|
||||
typing-extensions==4.12.2
|
||||
# via
|
||||
# altair
|
||||
# anyio
|
||||
# beautifulsoup4
|
||||
# huggingface-hub
|
||||
# llama-index-core
|
||||
# openai
|
||||
# pydantic
|
||||
# pydantic-core
|
||||
# pyee
|
||||
# referencing
|
||||
# sqlalchemy
|
||||
# streamlit
|
||||
# torch
|
||||
# typer
|
||||
# typing-inspect
|
||||
typing-inspect==0.9.0
|
||||
# via
|
||||
# dataclasses-json
|
||||
# llama-index-core
|
||||
tzdata==2025.1
|
||||
# via pandas
|
||||
urllib3==2.3.0
|
||||
# via
|
||||
# mixpanel
|
||||
# requests
|
||||
uv==0.6.5
|
||||
# via -r requirements/requirements-dev.in
|
||||
virtualenv==20.29.3
|
||||
# via pre-commit
|
||||
watchfiles==1.0.4
|
||||
# via -r requirements/requirements.in
|
||||
wcwidth==0.2.13
|
||||
# via prompt-toolkit
|
||||
wheel==0.45.1
|
||||
# via pip-tools
|
||||
wrapt==1.17.2
|
||||
# via
|
||||
# deprecated
|
||||
# llama-index-core
|
||||
yarl==1.18.3
|
||||
# via aiohttp
|
||||
zipp==3.21.0
|
||||
# via importlib-metadata
|
||||
@@ -1,4 +1 @@
|
||||
-c ../requirements.txt
|
||||
|
||||
streamlit
|
||||
watchdog<5 # docker build fails: streamlit 1.38.0 depends on watchdog<5
|
||||
|
||||
@@ -1,214 +1,155 @@
|
||||
#
|
||||
# This file is autogenerated by pip-compile with Python 3.12
|
||||
# by the following command:
|
||||
#
|
||||
# pip-compile --allow-unsafe --constraint=requirements.txt --constraint=requirements/requirements-dev.txt --constraint=requirements/requirements-help.txt --output-file=requirements/requirements-browser.txt requirements/requirements-browser.in
|
||||
#
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv pip compile --no-strip-extras --constraint=requirements/common-constraints.txt --output-file=requirements/requirements-browser.txt requirements/requirements-browser.in
|
||||
altair==5.5.0
|
||||
# via streamlit
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
attrs==25.1.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# jsonschema
|
||||
# referencing
|
||||
blinker==1.9.0
|
||||
# via streamlit
|
||||
cachetools==5.5.1
|
||||
# via streamlit
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
cachetools==5.5.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
certifi==2025.1.31
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# requests
|
||||
charset-normalizer==3.4.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# requests
|
||||
click==8.1.8
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
gitdb==4.0.12
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# gitpython
|
||||
gitpython==3.1.44
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
idna==3.10
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# requests
|
||||
jinja2==3.1.5
|
||||
jinja2==3.1.6
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# altair
|
||||
# pydeck
|
||||
jsonschema==4.23.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# altair
|
||||
jsonschema-specifications==2024.10.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# jsonschema
|
||||
markdown-it-py==3.0.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# rich
|
||||
markupsafe==3.0.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# jinja2
|
||||
mdurl==0.1.2
|
||||
narwhals==1.30.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# markdown-it-py
|
||||
narwhals==1.25.2
|
||||
# via altair
|
||||
# -c requirements/common-constraints.txt
|
||||
# altair
|
||||
numpy==1.26.4
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# pandas
|
||||
# pydeck
|
||||
# streamlit
|
||||
packaging==24.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# altair
|
||||
# streamlit
|
||||
pandas==2.2.3
|
||||
# via
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
pillow==10.4.0
|
||||
pillow==11.1.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
protobuf==5.29.3
|
||||
# via streamlit
|
||||
pyarrow==19.0.0
|
||||
# via streamlit
|
||||
pydeck==0.9.1
|
||||
# via streamlit
|
||||
pygments==2.19.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# rich
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
pyarrow==19.0.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
pydeck==0.9.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
python-dateutil==2.9.0.post0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# pandas
|
||||
pytz==2025.1
|
||||
# via
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# pandas
|
||||
referencing==0.36.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# jsonschema
|
||||
# jsonschema-specifications
|
||||
requests==2.32.3
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
rich==13.9.4
|
||||
rpds-py==0.23.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# streamlit
|
||||
rpds-py==0.22.3
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# jsonschema
|
||||
# referencing
|
||||
six==1.17.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# python-dateutil
|
||||
smmap==5.0.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# gitdb
|
||||
streamlit==1.42.0
|
||||
# via -r requirements/requirements-browser.in
|
||||
streamlit==1.43.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-browser.in
|
||||
tenacity==9.0.0
|
||||
# via
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
toml==0.10.2
|
||||
# via streamlit
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
tornado==6.4.2
|
||||
# via streamlit
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# streamlit
|
||||
typing-extensions==4.12.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# altair
|
||||
# referencing
|
||||
# streamlit
|
||||
tzdata==2025.1
|
||||
# via
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# pandas
|
||||
urllib3==2.3.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# requests
|
||||
watchdog==4.0.2
|
||||
# via -r requirements/requirements-browser.in
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
-c ../requirements.txt
|
||||
#
|
||||
# pip-compile --output-file=requirements-dev.txt requirements-dev.in --upgrade
|
||||
#
|
||||
pytest
|
||||
pytest-env
|
||||
pip-tools
|
||||
@@ -14,3 +10,4 @@ pre-commit
|
||||
cogapp
|
||||
semver
|
||||
codespell
|
||||
uv
|
||||
|
||||
@@ -1,233 +1,219 @@
|
||||
#
|
||||
# This file is autogenerated by pip-compile with Python 3.12
|
||||
# by the following command:
|
||||
#
|
||||
# pip-compile --allow-unsafe --constraint=requirements.txt --output-file=requirements/requirements-dev.txt requirements/requirements-dev.in
|
||||
#
|
||||
alabaster==1.0.0
|
||||
# via sphinx
|
||||
babel==2.17.0
|
||||
# via sphinx
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv pip compile --no-strip-extras --constraint=requirements/common-constraints.txt --output-file=requirements/requirements-dev.txt requirements/requirements-dev.in
|
||||
build==1.2.2.post1
|
||||
# via pip-tools
|
||||
certifi==2025.1.31
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# requests
|
||||
# -c requirements/common-constraints.txt
|
||||
# pip-tools
|
||||
cfgv==3.4.0
|
||||
# via pre-commit
|
||||
charset-normalizer==3.4.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# requests
|
||||
# -c requirements/common-constraints.txt
|
||||
# pre-commit
|
||||
click==8.1.8
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# pip-tools
|
||||
# typer
|
||||
codespell==2.4.1
|
||||
# via -r requirements/requirements-dev.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
cogapp==3.4.1
|
||||
# via -r requirements/requirements-dev.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
contourpy==1.3.1
|
||||
# via matplotlib
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# matplotlib
|
||||
cycler==0.12.1
|
||||
# via matplotlib
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# matplotlib
|
||||
dill==0.3.9
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# multiprocess
|
||||
# pathos
|
||||
distlib==0.3.9
|
||||
# via virtualenv
|
||||
docutils==0.21.2
|
||||
# via
|
||||
# sphinx
|
||||
# sphinx-rtd-theme
|
||||
# -c requirements/common-constraints.txt
|
||||
# virtualenv
|
||||
filelock==3.17.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# virtualenv
|
||||
fonttools==4.56.0
|
||||
# via matplotlib
|
||||
identify==2.6.6
|
||||
# via pre-commit
|
||||
idna==3.10
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# requests
|
||||
imagesize==1.4.1
|
||||
# via sphinx
|
||||
# -c requirements/common-constraints.txt
|
||||
# matplotlib
|
||||
identify==2.6.9
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pre-commit
|
||||
imgcat==0.6.0
|
||||
# via -r requirements/requirements-dev.in
|
||||
iniconfig==2.0.0
|
||||
# via pytest
|
||||
jinja2==3.1.5
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# sphinx
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
iniconfig==2.0.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pytest
|
||||
kiwisolver==1.4.8
|
||||
# via matplotlib
|
||||
lox==0.12.0
|
||||
# via -r requirements/requirements-dev.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# matplotlib
|
||||
lox==0.13.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
markdown-it-py==3.0.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# rich
|
||||
markupsafe==3.0.2
|
||||
matplotlib==3.10.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# jinja2
|
||||
matplotlib==3.10.0
|
||||
# via -r requirements/requirements-dev.in
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
mdurl==0.1.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# markdown-it-py
|
||||
multiprocess==0.70.17
|
||||
# via pathos
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pathos
|
||||
nodeenv==1.9.1
|
||||
# via pre-commit
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pre-commit
|
||||
numpy==1.26.4
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# contourpy
|
||||
# matplotlib
|
||||
# pandas
|
||||
packaging==24.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# build
|
||||
# matplotlib
|
||||
# pytest
|
||||
# sphinx
|
||||
pandas==2.2.3
|
||||
# via -r requirements/requirements-dev.in
|
||||
pathos==0.3.3
|
||||
# via lox
|
||||
pillow==10.4.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
pathos==0.3.3
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# lox
|
||||
pillow==11.1.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# matplotlib
|
||||
pip==25.0.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pip-tools
|
||||
pip-tools==7.4.1
|
||||
# via -r requirements/requirements-dev.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
platformdirs==4.3.6
|
||||
# via virtualenv
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# virtualenv
|
||||
pluggy==1.5.0
|
||||
# via pytest
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pytest
|
||||
pox==0.3.5
|
||||
# via pathos
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pathos
|
||||
ppft==1.7.6.9
|
||||
# via pathos
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pathos
|
||||
pre-commit==4.1.0
|
||||
# via -r requirements/requirements-dev.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
pygments==2.19.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# rich
|
||||
# sphinx
|
||||
pyparsing==3.2.1
|
||||
# via matplotlib
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# matplotlib
|
||||
pyproject-hooks==1.2.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# build
|
||||
# pip-tools
|
||||
pytest==8.3.4
|
||||
pytest==8.3.5
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
# pytest-env
|
||||
pytest-env==1.1.5
|
||||
# via -r requirements/requirements-dev.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
python-dateutil==2.9.0.post0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# matplotlib
|
||||
# pandas
|
||||
pytz==2025.1
|
||||
# via pandas
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pandas
|
||||
pyyaml==6.0.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# pre-commit
|
||||
requests==2.32.3
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# sphinx
|
||||
rich==13.9.4
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# typer
|
||||
semver==3.0.4
|
||||
# via -r requirements/requirements-dev.in
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
setuptools==76.0.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pip-tools
|
||||
shellingham==1.5.4
|
||||
# via typer
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# typer
|
||||
six==1.17.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# python-dateutil
|
||||
snowballstemmer==2.2.0
|
||||
# via sphinx
|
||||
sphinx==8.1.3
|
||||
typer==0.15.2
|
||||
# via
|
||||
# sphinx-rtd-theme
|
||||
# sphinxcontrib-jquery
|
||||
sphinx-rtd-theme==3.0.2
|
||||
# via lox
|
||||
sphinxcontrib-applehelp==2.0.0
|
||||
# via sphinx
|
||||
sphinxcontrib-devhelp==2.0.0
|
||||
# via sphinx
|
||||
sphinxcontrib-htmlhelp==2.1.0
|
||||
# via sphinx
|
||||
sphinxcontrib-jquery==4.1
|
||||
# via sphinx-rtd-theme
|
||||
sphinxcontrib-jsmath==1.0.1
|
||||
# via sphinx
|
||||
sphinxcontrib-qthelp==2.0.0
|
||||
# via sphinx
|
||||
sphinxcontrib-serializinghtml==2.0.0
|
||||
# via sphinx
|
||||
typer==0.15.1
|
||||
# via -r requirements/requirements-dev.in
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
typing-extensions==4.12.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# typer
|
||||
tzdata==2025.1
|
||||
# via pandas
|
||||
urllib3==2.3.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# requests
|
||||
virtualenv==20.29.1
|
||||
# via pre-commit
|
||||
# -c requirements/common-constraints.txt
|
||||
# pandas
|
||||
uv==0.6.5
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
virtualenv==20.29.3
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pre-commit
|
||||
wheel==0.45.1
|
||||
# via pip-tools
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
pip==25.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# pip-tools
|
||||
setuptools==75.8.0
|
||||
# via pip-tools
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
-c ../requirements.txt
|
||||
#
|
||||
# pip-compile --output-file=requirements-hf.txt requirements-hf.in --upgrade
|
||||
#
|
||||
|
||||
llama-index-core
|
||||
llama-index-embeddings-huggingface
|
||||
|
||||
# requirement-help and requirements-playwright choose different versions
|
||||
greenlet==3.0.3
|
||||
# Because sentence-transformers doesn't like >=2
|
||||
numpy<2
|
||||
|
||||
# Mac x86 only supports 2.2.2
|
||||
# https://discuss.pytorch.org/t/why-no-macosx-x86-64-build-after-torch-2-2-2-cp39-none-macosx-10-9-x86-64-whl/204546/2
|
||||
torch==2.2.2
|
||||
@@ -1,288 +1,275 @@
|
||||
#
|
||||
# This file is autogenerated by pip-compile with Python 3.12
|
||||
# by the following command:
|
||||
#
|
||||
# pip-compile --allow-unsafe --constraint=requirements.txt --constraint=requirements/requirements-dev.txt --output-file=requirements/requirements-help.txt requirements/requirements-help.in
|
||||
#
|
||||
aiohappyeyeballs==2.4.6
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv pip compile --no-strip-extras --constraint=requirements/common-constraints.txt --output-file=requirements/requirements-help.txt requirements/requirements-help.in
|
||||
aiohappyeyeballs==2.5.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
aiohttp==3.11.12
|
||||
aiohttp==3.11.13
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
# llama-index-core
|
||||
aiosignal==1.3.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
annotated-types==0.7.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# pydantic
|
||||
anyio==4.8.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# httpx
|
||||
attrs==25.1.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
certifi==2025.1.31
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# httpcore
|
||||
# httpx
|
||||
# requests
|
||||
charset-normalizer==3.4.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# requests
|
||||
click==8.1.8
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# nltk
|
||||
dataclasses-json==0.6.7
|
||||
# via llama-index-core
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
deprecated==1.2.18
|
||||
# via llama-index-core
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
dirtyjson==1.0.8
|
||||
# via llama-index-core
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
filelock==3.17.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
# torch
|
||||
# transformers
|
||||
filetype==1.2.0
|
||||
# via llama-index-core
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
frozenlist==1.5.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
# aiosignal
|
||||
fsspec==2025.2.0
|
||||
fsspec==2025.3.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
# llama-index-core
|
||||
# torch
|
||||
greenlet==3.0.3
|
||||
greenlet==3.1.1
|
||||
# via
|
||||
# -r requirements/requirements-help.in
|
||||
# -c requirements/common-constraints.txt
|
||||
# sqlalchemy
|
||||
h11==0.14.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# httpcore
|
||||
httpcore==1.0.7
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# httpx
|
||||
httpx==0.28.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
huggingface-hub[inference]==0.28.1
|
||||
huggingface-hub[inference]==0.29.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-embeddings-huggingface
|
||||
# sentence-transformers
|
||||
# tokenizers
|
||||
# transformers
|
||||
idna==3.10
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# anyio
|
||||
# httpx
|
||||
# requests
|
||||
# yarl
|
||||
jinja2==3.1.5
|
||||
jinja2==3.1.6
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# torch
|
||||
joblib==1.4.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# nltk
|
||||
# scikit-learn
|
||||
llama-index-core==0.12.16.post1
|
||||
llama-index-core==0.12.23.post2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-help.in
|
||||
# llama-index-embeddings-huggingface
|
||||
llama-index-embeddings-huggingface==0.5.1
|
||||
# via -r requirements/requirements-help.in
|
||||
llama-index-embeddings-huggingface==0.5.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-help.in
|
||||
markupsafe==3.0.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# jinja2
|
||||
marshmallow==3.26.1
|
||||
# via dataclasses-json
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# dataclasses-json
|
||||
mpmath==1.3.0
|
||||
# via sympy
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# sympy
|
||||
multidict==6.1.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
# yarl
|
||||
mypy-extensions==1.0.0
|
||||
# via typing-inspect
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# typing-inspect
|
||||
nest-asyncio==1.6.0
|
||||
# via llama-index-core
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
networkx==3.2.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
# torch
|
||||
nltk==3.9.1
|
||||
# via llama-index-core
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
numpy==1.26.4
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-help.in
|
||||
# llama-index-core
|
||||
# scikit-learn
|
||||
# scipy
|
||||
# transformers
|
||||
packaging==24.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
# marshmallow
|
||||
# transformers
|
||||
pillow==10.4.0
|
||||
pillow==11.1.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
# sentence-transformers
|
||||
propcache==0.2.1
|
||||
propcache==0.3.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
# yarl
|
||||
pydantic==2.10.6
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
pydantic-core==2.27.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# pydantic
|
||||
pyyaml==6.0.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
# llama-index-core
|
||||
# transformers
|
||||
regex==2024.11.6
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# nltk
|
||||
# tiktoken
|
||||
# transformers
|
||||
requests==2.32.3
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
# llama-index-core
|
||||
# tiktoken
|
||||
# transformers
|
||||
safetensors==0.5.2
|
||||
# via transformers
|
||||
safetensors==0.5.3
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# transformers
|
||||
scikit-learn==1.6.1
|
||||
# via sentence-transformers
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# sentence-transformers
|
||||
scipy==1.13.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# scikit-learn
|
||||
# sentence-transformers
|
||||
sentence-transformers==3.4.1
|
||||
# via llama-index-embeddings-huggingface
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-embeddings-huggingface
|
||||
sniffio==1.3.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# anyio
|
||||
sqlalchemy[asyncio]==2.0.38
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
# sqlalchemy
|
||||
sympy==1.13.3
|
||||
# via torch
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# torch
|
||||
tenacity==9.0.0
|
||||
# via llama-index-core
|
||||
threadpoolctl==3.5.0
|
||||
# via scikit-learn
|
||||
tiktoken==0.8.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
tokenizers==0.19.1
|
||||
threadpoolctl==3.5.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# scikit-learn
|
||||
tiktoken==0.9.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
tokenizers==0.21.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# transformers
|
||||
torch==2.2.2
|
||||
# via sentence-transformers
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-help.in
|
||||
# sentence-transformers
|
||||
tqdm==4.67.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
# llama-index-core
|
||||
# nltk
|
||||
# sentence-transformers
|
||||
# transformers
|
||||
transformers==4.44.2
|
||||
# via sentence-transformers
|
||||
transformers==4.49.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# sentence-transformers
|
||||
typing-extensions==4.12.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# anyio
|
||||
# huggingface-hub
|
||||
# llama-index-core
|
||||
@@ -293,20 +280,19 @@ typing-extensions==4.12.2
|
||||
# typing-inspect
|
||||
typing-inspect==0.9.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# dataclasses-json
|
||||
# llama-index-core
|
||||
urllib3==2.3.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# requests
|
||||
wrapt==1.17.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# deprecated
|
||||
# llama-index-core
|
||||
yarl==1.18.3
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
|
||||
@@ -1,6 +1 @@
|
||||
-c ../requirements.txt
|
||||
|
||||
playwright
|
||||
|
||||
# requirement-help and requirements-playwright choose different versions
|
||||
greenlet==3.0.3
|
||||
|
||||
@@ -1,23 +1,18 @@
|
||||
#
|
||||
# This file is autogenerated by pip-compile with Python 3.12
|
||||
# by the following command:
|
||||
#
|
||||
# pip-compile --allow-unsafe --constraint=requirements.txt --constraint=requirements/requirements-browser.txt --constraint=requirements/requirements-dev.txt --constraint=requirements/requirements-help.txt --output-file=requirements/requirements-playwright.txt requirements/requirements-playwright.in
|
||||
#
|
||||
greenlet==3.0.3
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv pip compile --no-strip-extras --constraint=requirements/common-constraints.txt --output-file=requirements/requirements-playwright.txt requirements/requirements-playwright.in
|
||||
greenlet==3.1.1
|
||||
# via
|
||||
# -c requirements/requirements-help.txt
|
||||
# -r requirements/requirements-playwright.in
|
||||
# -c requirements/common-constraints.txt
|
||||
# playwright
|
||||
playwright==1.50.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-playwright.in
|
||||
pyee==12.1.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# playwright
|
||||
playwright==1.47.0
|
||||
# via -r requirements/requirements-playwright.in
|
||||
pyee==12.0.0
|
||||
# via playwright
|
||||
typing-extensions==4.12.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# -c requirements/requirements-browser.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# -c requirements/common-constraints.txt
|
||||
# pyee
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
#
|
||||
# pip-compile requirements.in --upgrade
|
||||
#
|
||||
|
||||
pydub
|
||||
configargparse
|
||||
GitPython
|
||||
@@ -29,7 +25,9 @@ pexpect
|
||||
json5
|
||||
psutil
|
||||
watchfiles
|
||||
socksio
|
||||
pip
|
||||
pillow
|
||||
|
||||
# The proper dependency is networkx[default], but this brings
|
||||
# in matplotlib and a bunch of other deps
|
||||
@@ -44,20 +42,7 @@ networkx<3.3
|
||||
# Pin below 1.14 to retain python 3.9 compatibility.
|
||||
scipy<1.14
|
||||
|
||||
# v0.22.2 seems to break tree-sitter-languages?
|
||||
tree-sitter==0.21.3
|
||||
|
||||
# GitHub Release action failing on "KeyError: 'home-page'"
|
||||
# https://github.com/pypa/twine/blob/6fbf880ee60915cf1666348c4bdd78a10415f2ac/twine/__init__.py#L40
|
||||
# Uses importlib-metadata
|
||||
importlib-metadata<8.0.0
|
||||
|
||||
# Because sentence-transformers doesn't like >=2
|
||||
numpy<2
|
||||
|
||||
# Going past this makes dependencies unresolvable
|
||||
# Seems to be caused by sentence-transformers
|
||||
tokenizers==0.19.1
|
||||
|
||||
# streamlit 1.39.0 depends on this, as far back as 1.22 which is ancient and doesn't have chat ui
|
||||
Pillow<11
|
||||
|
||||
3
requirements/tree-sitter.in
Normal file
3
requirements/tree-sitter.in
Normal file
@@ -0,0 +1,3 @@
|
||||
|
||||
tree-sitter==0.23.2; python_version < "3.10"
|
||||
tree-sitter==0.24.0; python_version >= "3.10"
|
||||
@@ -38,6 +38,7 @@ def blame(start_tag, end_tag=None):
|
||||
for f in files
|
||||
if f.endswith((".js", ".py", ".scm", ".sh", "Dockerfile", "Gemfile"))
|
||||
or (f.startswith(".github/workflows/") and f.endswith(".yml"))
|
||||
or (f.startswith("aider/resources/") and f.endswith(".yml"))
|
||||
or f in website_files
|
||||
or f in test_files
|
||||
]
|
||||
|
||||
@@ -126,6 +126,11 @@ def find_oldest_issue(subject, all_issues):
|
||||
|
||||
|
||||
def comment_and_close_duplicate(issue, oldest_issue):
|
||||
# Skip if issue is labeled as priority
|
||||
if "priority" in [label["name"] for label in issue["labels"]]:
|
||||
print(f" - Skipping priority issue #{issue['number']}")
|
||||
return
|
||||
|
||||
comment_url = (
|
||||
f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}/comments"
|
||||
)
|
||||
@@ -168,7 +173,11 @@ def find_unlabeled_with_paul_comments(issues):
|
||||
|
||||
def handle_unlabeled_issues(all_issues, auto_yes):
|
||||
print("\nFinding unlabeled issues with paul-gauthier comments...")
|
||||
unlabeled_issues = find_unlabeled_with_paul_comments(all_issues)
|
||||
unlabeled_issues = [
|
||||
issue
|
||||
for issue in find_unlabeled_with_paul_comments(all_issues)
|
||||
if "priority" not in [label["name"] for label in issue["labels"]]
|
||||
]
|
||||
|
||||
if not unlabeled_issues:
|
||||
print("No unlabeled issues with paul-gauthier comments found.")
|
||||
@@ -197,10 +206,12 @@ def handle_stale_issues(all_issues, auto_yes):
|
||||
|
||||
for issue in all_issues:
|
||||
# Skip if not open, not a question, already stale, or has been reopened
|
||||
labels = [label["name"] for label in issue["labels"]]
|
||||
if (
|
||||
issue["state"] != "open"
|
||||
or "question" not in [label["name"] for label in issue["labels"]]
|
||||
or "stale" in [label["name"] for label in issue["labels"]]
|
||||
or "question" not in labels
|
||||
or "stale" in labels
|
||||
or "priority" in labels
|
||||
or has_been_reopened(issue["number"])
|
||||
):
|
||||
continue
|
||||
@@ -239,8 +250,9 @@ def handle_stale_closing(all_issues, auto_yes):
|
||||
print("\nChecking for issues to close or unstale...")
|
||||
|
||||
for issue in all_issues:
|
||||
# Skip if not open or not stale
|
||||
if issue["state"] != "open" or "stale" not in [label["name"] for label in issue["labels"]]:
|
||||
# Skip if not open, not stale, or is priority
|
||||
labels = [label["name"] for label in issue["labels"]]
|
||||
if issue["state"] != "open" or "stale" not in labels or "priority" in labels:
|
||||
continue
|
||||
|
||||
# Get the timeline to find when the stale label was last added
|
||||
@@ -324,9 +336,9 @@ def handle_fixed_issues(all_issues, auto_yes):
|
||||
print("\nChecking for fixed enhancement and bug issues to close...")
|
||||
|
||||
for issue in all_issues:
|
||||
# Skip if not open or doesn't have fixed label
|
||||
# Skip if not open, doesn't have fixed label, or is priority
|
||||
labels = [label["name"] for label in issue["labels"]]
|
||||
if issue["state"] != "open" or "fixed" not in labels:
|
||||
if issue["state"] != "open" or "fixed" not in labels or "priority" in labels:
|
||||
continue
|
||||
|
||||
# Check if it's an enhancement or bug
|
||||
|
||||
@@ -3,25 +3,41 @@
|
||||
# exit when any command fails
|
||||
set -e
|
||||
|
||||
# First compile the base requirements
|
||||
pip-compile \
|
||||
--allow-unsafe \
|
||||
# Add verbosity flag to see more details about dependency resolution
|
||||
VERBOSITY="-v" # Use -v for less detail, -vvv for even more detail
|
||||
|
||||
# First compile the common constraints of the full requirement suite
|
||||
# to make sure that all versions are mutually consistent across files
|
||||
uv pip compile \
|
||||
$VERBOSITY \
|
||||
--no-strip-extras \
|
||||
--output-file=requirements/common-constraints.txt \
|
||||
requirements/requirements.in \
|
||||
--output-file=requirements.txt \
|
||||
requirements/requirements-*.in \
|
||||
$1
|
||||
|
||||
# Then compile each additional requirements file in sequence
|
||||
# Compile the base requirements
|
||||
uv pip compile \
|
||||
$VERBOSITY \
|
||||
--no-strip-extras \
|
||||
--constraint=requirements/common-constraints.txt \
|
||||
--output-file=tmp.requirements.txt \
|
||||
requirements/requirements.in \
|
||||
$1
|
||||
|
||||
grep -v ^tree-sitter= tmp.requirements.txt \
|
||||
| cat - requirements/tree-sitter.in \
|
||||
> requirements.txt
|
||||
|
||||
# Compile additional requirements files
|
||||
SUFFIXES=(dev help browser playwright)
|
||||
CONSTRAINTS="--constraint=requirements.txt"
|
||||
|
||||
for SUFFIX in "${SUFFIXES[@]}"; do
|
||||
pip-compile \
|
||||
--allow-unsafe \
|
||||
requirements/requirements-${SUFFIX}.in \
|
||||
uv pip compile \
|
||||
$VERBOSITY \
|
||||
--no-strip-extras \
|
||||
--constraint=requirements/common-constraints.txt \
|
||||
--output-file=requirements/requirements-${SUFFIX}.txt \
|
||||
${CONSTRAINTS} \
|
||||
requirements/requirements-${SUFFIX}.in \
|
||||
$1
|
||||
|
||||
# Add this file as a constraint for the next iteration
|
||||
CONSTRAINTS+=" --constraint=requirements/requirements-${SUFFIX}.txt"
|
||||
done
|
||||
|
||||
@@ -81,7 +81,7 @@ def main():
|
||||
# Construct and run the aider command
|
||||
message = history_prompt.format(aider_line=aider_line)
|
||||
|
||||
cmd = ["aider", hist_path, "--read", diff_path, "--msg", message, "--no-auto-commit"]
|
||||
cmd = ["aider", hist_path, "--read", diff_path, "--msg", message, "--no-git", "--no-auto-lint"]
|
||||
subprocess.run(cmd)
|
||||
|
||||
# Read back the updated history
|
||||
|
||||
@@ -10,6 +10,73 @@ import sys
|
||||
from packaging import version
|
||||
|
||||
|
||||
# Function to check if we are on the main branch
|
||||
def check_branch():
|
||||
branch = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"], capture_output=True, text=True
|
||||
).stdout.strip()
|
||||
if branch != "main":
|
||||
print("Error: Not on the main branch.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Function to check if the working directory is clean
|
||||
def check_working_directory_clean():
|
||||
status = subprocess.run(["git", "status", "--porcelain"], capture_output=True, text=True).stdout
|
||||
if status:
|
||||
print("Error: Working directory is not clean.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Function to fetch the latest changes and check if the main branch is up to date
|
||||
def check_main_branch_up_to_date():
|
||||
subprocess.run(["git", "fetch", "origin"], check=True)
|
||||
local_main = subprocess.run(
|
||||
["git", "rev-parse", "main"], capture_output=True, text=True
|
||||
).stdout.strip()
|
||||
print(f"Local main commit hash: {local_main}")
|
||||
origin_main = subprocess.run(
|
||||
["git", "rev-parse", "origin/main"], capture_output=True, text=True
|
||||
).stdout.strip()
|
||||
print(f"Origin main commit hash: {origin_main}")
|
||||
if local_main != origin_main:
|
||||
local_date = subprocess.run(
|
||||
["git", "show", "-s", "--format=%ci", "main"], capture_output=True, text=True
|
||||
).stdout.strip()
|
||||
origin_date = subprocess.run(
|
||||
["git", "show", "-s", "--format=%ci", "origin/main"], capture_output=True, text=True
|
||||
).stdout.strip()
|
||||
local_date = datetime.datetime.strptime(local_date, "%Y-%m-%d %H:%M:%S %z")
|
||||
origin_date = datetime.datetime.strptime(origin_date, "%Y-%m-%d %H:%M:%S %z")
|
||||
if local_date < origin_date:
|
||||
print(
|
||||
"Error: The local main branch is behind origin/main. Please pull the latest"
|
||||
" changes."
|
||||
)
|
||||
elif local_date > origin_date:
|
||||
print(
|
||||
"Error: The origin/main branch is behind the local main branch. Please push"
|
||||
" your changes."
|
||||
)
|
||||
else:
|
||||
print("Error: The main branch and origin/main have diverged.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Function to check if we can push to the origin repository
|
||||
def check_ok_to_push():
|
||||
print("Checking if it's ok to push to origin repository...")
|
||||
result = subprocess.run(["git", "push", "--dry-run", "origin"], capture_output=True, text=True)
|
||||
print(result.stdout)
|
||||
print(result.stderr)
|
||||
|
||||
if result.returncode != 0:
|
||||
print("Error: Cannot push to origin repository.")
|
||||
sys.exit(1)
|
||||
|
||||
print("Push to origin repository is possible.")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Bump version")
|
||||
parser.add_argument("new_version", help="New version in x.y.z format")
|
||||
@@ -17,58 +84,6 @@ def main():
|
||||
"--dry-run", action="store_true", help="Print each step without actually executing them"
|
||||
)
|
||||
|
||||
# Function to check if we are on the main branch
|
||||
def check_branch():
|
||||
branch = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"], capture_output=True, text=True
|
||||
).stdout.strip()
|
||||
if branch != "main":
|
||||
print("Error: Not on the main branch.")
|
||||
sys.exit(1)
|
||||
|
||||
# Function to check if the working directory is clean
|
||||
def check_working_directory_clean():
|
||||
status = subprocess.run(
|
||||
["git", "status", "--porcelain"], capture_output=True, text=True
|
||||
).stdout
|
||||
if status:
|
||||
print("Error: Working directory is not clean.")
|
||||
sys.exit(1)
|
||||
|
||||
# Function to fetch the latest changes and check if the main branch is up to date
|
||||
def check_main_branch_up_to_date():
|
||||
subprocess.run(["git", "fetch", "origin"], check=True)
|
||||
local_main = subprocess.run(
|
||||
["git", "rev-parse", "main"], capture_output=True, text=True
|
||||
).stdout.strip()
|
||||
print(f"Local main commit hash: {local_main}")
|
||||
origin_main = subprocess.run(
|
||||
["git", "rev-parse", "origin/main"], capture_output=True, text=True
|
||||
).stdout.strip()
|
||||
print(f"Origin main commit hash: {origin_main}")
|
||||
if local_main != origin_main:
|
||||
local_date = subprocess.run(
|
||||
["git", "show", "-s", "--format=%ci", "main"], capture_output=True, text=True
|
||||
).stdout.strip()
|
||||
origin_date = subprocess.run(
|
||||
["git", "show", "-s", "--format=%ci", "origin/main"], capture_output=True, text=True
|
||||
).stdout.strip()
|
||||
local_date = datetime.datetime.strptime(local_date, "%Y-%m-%d %H:%M:%S %z")
|
||||
origin_date = datetime.datetime.strptime(origin_date, "%Y-%m-%d %H:%M:%S %z")
|
||||
if local_date < origin_date:
|
||||
print(
|
||||
"Error: The local main branch is behind origin/main. Please pull the latest"
|
||||
" changes."
|
||||
)
|
||||
elif local_date > origin_date:
|
||||
print(
|
||||
"Error: The origin/main branch is behind the local main branch. Please push"
|
||||
" your changes."
|
||||
)
|
||||
else:
|
||||
print("Error: The main branch and origin/main have diverged.")
|
||||
sys.exit(1)
|
||||
|
||||
args = parser.parse_args()
|
||||
dry_run = args.dry_run
|
||||
|
||||
@@ -76,6 +91,7 @@ def main():
|
||||
check_branch()
|
||||
check_working_directory_clean()
|
||||
check_main_branch_up_to_date()
|
||||
check_ok_to_push()
|
||||
|
||||
new_version_str = args.new_version
|
||||
if not re.match(r"^\d+\.\d+\.\d+$", new_version_str):
|
||||
@@ -107,7 +123,7 @@ def main():
|
||||
["git", "add", "aider/__init__.py"],
|
||||
["git", "commit", "-m", f"version bump to {new_version}"],
|
||||
["git", "tag", f"v{new_version}"],
|
||||
["git", "push", "origin"],
|
||||
["git", "push", "origin", "--no-verify"],
|
||||
["git", "push", "origin", f"v{new_version}", "--no-verify"],
|
||||
]
|
||||
|
||||
|
||||
140
tests/basic/test_deprecated.py
Normal file
140
tests/basic/test_deprecated.py
Normal file
@@ -0,0 +1,140 @@
|
||||
import os
|
||||
from unittest import TestCase
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from prompt_toolkit.input import DummyInput
|
||||
from prompt_toolkit.output import DummyOutput
|
||||
|
||||
from aider.deprecated import handle_deprecated_model_args
|
||||
from aider.dump import dump # noqa
|
||||
from aider.main import main
|
||||
|
||||
|
||||
class TestDeprecated(TestCase):
|
||||
def setUp(self):
|
||||
self.original_env = os.environ.copy()
|
||||
os.environ["OPENAI_API_KEY"] = "deadbeef"
|
||||
os.environ["AIDER_CHECK_UPDATE"] = "false"
|
||||
os.environ["AIDER_ANALYTICS"] = "false"
|
||||
|
||||
def tearDown(self):
|
||||
os.environ.clear()
|
||||
os.environ.update(self.original_env)
|
||||
|
||||
@patch("aider.io.InputOutput.tool_warning")
|
||||
@patch("aider.io.InputOutput.offer_url")
|
||||
def test_deprecated_args_show_warnings(self, mock_offer_url, mock_tool_warning):
|
||||
# Prevent URL launches during tests
|
||||
mock_offer_url.return_value = False
|
||||
# Test all deprecated flags to ensure they show warnings
|
||||
deprecated_flags = [
|
||||
"--opus",
|
||||
"--sonnet",
|
||||
"--haiku",
|
||||
"--4",
|
||||
"-4",
|
||||
"--4o",
|
||||
"--mini",
|
||||
"--4-turbo",
|
||||
"--35turbo",
|
||||
"--35-turbo",
|
||||
"--3",
|
||||
"-3",
|
||||
"--deepseek",
|
||||
"--o1-mini",
|
||||
"--o1-preview",
|
||||
]
|
||||
|
||||
for flag in deprecated_flags:
|
||||
mock_tool_warning.reset_mock()
|
||||
|
||||
with patch("aider.models.Model"), self.subTest(flag=flag):
|
||||
main(
|
||||
[flag, "--no-git", "--exit", "--yes"], input=DummyInput(), output=DummyOutput()
|
||||
)
|
||||
|
||||
# Look for the deprecation warning in all calls
|
||||
deprecation_warning = None
|
||||
dump(flag)
|
||||
dump(mock_tool_warning.call_args_list)
|
||||
for call_args in mock_tool_warning.call_args_list:
|
||||
dump(call_args)
|
||||
if "deprecated" in call_args[0][0]:
|
||||
deprecation_warning = call_args[0][0]
|
||||
break
|
||||
|
||||
self.assertIsNotNone(
|
||||
deprecation_warning, f"No deprecation warning found for {flag}"
|
||||
)
|
||||
warning_msg = deprecation_warning
|
||||
|
||||
self.assertIn("deprecated", warning_msg)
|
||||
self.assertIn("use --model", warning_msg.lower())
|
||||
|
||||
@patch("aider.io.InputOutput.tool_warning")
|
||||
@patch("aider.io.InputOutput.offer_url")
|
||||
def test_model_alias_in_warning(self, mock_offer_url, mock_tool_warning):
|
||||
# Prevent URL launches during tests
|
||||
mock_offer_url.return_value = False
|
||||
# Test that the warning uses the model alias if available
|
||||
with patch("aider.models.MODEL_ALIASES", {"gpt4": "gpt-4-0613"}):
|
||||
with patch("aider.models.Model"):
|
||||
main(
|
||||
["--4", "--no-git", "--exit", "--yes"], input=DummyInput(), output=DummyOutput()
|
||||
)
|
||||
|
||||
# Look for the deprecation warning in all calls
|
||||
deprecation_warning = None
|
||||
for call_args in mock_tool_warning.call_args_list:
|
||||
if "deprecated" in call_args[0][0] and "--model gpt4" in call_args[0][0]:
|
||||
deprecation_warning = call_args[0][0]
|
||||
break
|
||||
|
||||
self.assertIsNotNone(
|
||||
deprecation_warning, "No deprecation warning with model alias found"
|
||||
)
|
||||
warning_msg = deprecation_warning
|
||||
self.assertIn("--model gpt4", warning_msg)
|
||||
self.assertNotIn("--model gpt-4-0613", warning_msg)
|
||||
|
||||
def test_model_is_set_correctly(self):
|
||||
test_cases = [
|
||||
("opus", "claude-3-opus-20240229"),
|
||||
("sonnet", "anthropic/claude-3-7-sonnet-20250219"),
|
||||
("haiku", "claude-3-5-haiku-20241022"),
|
||||
("4", "gpt-4-0613"),
|
||||
# Testing the dash variant with underscore in attribute name
|
||||
("4o", "gpt-4o"),
|
||||
("mini", "gpt-4o-mini"),
|
||||
("4_turbo", "gpt-4-1106-preview"),
|
||||
("35turbo", "gpt-3.5-turbo"),
|
||||
("deepseek", "deepseek/deepseek-chat"),
|
||||
("o1_mini", "o1-mini"),
|
||||
("o1_preview", "o1-preview"),
|
||||
]
|
||||
|
||||
for flag, expected_model in test_cases:
|
||||
print(flag, expected_model)
|
||||
|
||||
with self.subTest(flag=flag):
|
||||
# Create a mock IO instance
|
||||
mock_io = MagicMock()
|
||||
|
||||
# Create args with ONLY the current flag set to True
|
||||
args = MagicMock()
|
||||
args.model = None
|
||||
|
||||
# Ensure all flags are False by default
|
||||
for test_flag, _ in test_cases:
|
||||
setattr(args, test_flag, False)
|
||||
|
||||
# Set only the current flag to True
|
||||
setattr(args, flag, True)
|
||||
|
||||
dump(args)
|
||||
|
||||
# Call the handle_deprecated_model_args function
|
||||
handle_deprecated_model_args(args, mock_io)
|
||||
|
||||
# Check that args.model was set to the expected model
|
||||
self.assertEqual(args.model, expected_model)
|
||||
@@ -1,8 +1,6 @@
|
||||
import os
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from aider.editor import (
|
||||
DEFAULT_EDITOR_NIX,
|
||||
DEFAULT_EDITOR_OS_X,
|
||||
@@ -21,7 +19,7 @@ def test_get_environment_editor():
|
||||
assert get_environment_editor("default") == "default"
|
||||
|
||||
# Test EDITOR precedence
|
||||
with patch.dict(os.environ, {"EDITOR": "vim"}):
|
||||
with patch.dict(os.environ, {"EDITOR": "vim"}, clear=True):
|
||||
assert get_environment_editor() == "vim"
|
||||
|
||||
# Test VISUAL overrides EDITOR
|
||||
@@ -34,17 +32,17 @@ def test_discover_editor_defaults():
|
||||
# Test Windows default
|
||||
mock_system.return_value = "Windows"
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
assert discover_editor() == [DEFAULT_EDITOR_WINDOWS]
|
||||
assert discover_editor() == DEFAULT_EDITOR_WINDOWS
|
||||
|
||||
# Test macOS default
|
||||
mock_system.return_value = "Darwin"
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
assert discover_editor() == [DEFAULT_EDITOR_OS_X]
|
||||
assert discover_editor() == DEFAULT_EDITOR_OS_X
|
||||
|
||||
# Test Linux default
|
||||
mock_system.return_value = "Linux"
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
assert discover_editor() == [DEFAULT_EDITOR_NIX]
|
||||
assert discover_editor() == DEFAULT_EDITOR_NIX
|
||||
|
||||
|
||||
def test_write_temp_file():
|
||||
@@ -81,12 +79,44 @@ def test_print_status_message(capsys):
|
||||
|
||||
def test_discover_editor_override():
|
||||
# Test editor override
|
||||
assert discover_editor("code") == ["code"]
|
||||
assert discover_editor('vim -c "set noswapfile"') == ["vim", "-c", "set noswapfile"]
|
||||
assert discover_editor("code") == "code"
|
||||
assert discover_editor('vim -c "set noswapfile"') == 'vim -c "set noswapfile"'
|
||||
|
||||
# Test invalid editor command
|
||||
with pytest.raises(RuntimeError):
|
||||
discover_editor('vim "unclosed quote')
|
||||
|
||||
def test_pipe_editor_with_fake_editor():
|
||||
# Create a temporary Python script that logs its arguments
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".log", delete=False) as log_f:
|
||||
log_path = log_f.name
|
||||
# Convert to raw string path to avoid escape issues on Windows
|
||||
log_path_escaped = log_path.replace("\\", "\\\\")
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
|
||||
f.write(f"""import sys
|
||||
with open(r"{log_path_escaped}", "w") as f:
|
||||
f.write(" ".join(sys.argv))
|
||||
""")
|
||||
script_path = f.name
|
||||
|
||||
try:
|
||||
# Use the Python script as editor and verify it's called with .md file
|
||||
python_exe = sys.executable
|
||||
editor_cmd = f"{python_exe} {script_path}"
|
||||
pipe_editor("test content", suffix="md", editor=editor_cmd)
|
||||
|
||||
# Read the log file to see what arguments were passed
|
||||
with open(log_path) as f:
|
||||
called_args = f.read().strip()
|
||||
|
||||
# Verify the editor was called with a .md file
|
||||
assert called_args.endswith(".md"), f"Called args: {called_args!r}"
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
os.unlink(script_path)
|
||||
os.unlink(log_path)
|
||||
|
||||
|
||||
def test_pipe_editor():
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user