mirror of
https://github.com/Aider-AI/aider
synced 2026-04-26 01:25:17 +02:00
Compare commits
205 Commits
v0.61.1.de
...
v0.63.3.de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
abf804cf10 | ||
|
|
d7a195706f | ||
|
|
569370109a | ||
|
|
eae3f04c83 | ||
|
|
ee0987f331 | ||
|
|
bdafa842bd | ||
|
|
76c7c2562c | ||
|
|
f7de2234f2 | ||
|
|
c725c45726 | ||
|
|
383bef456d | ||
|
|
7f48f3d01d | ||
|
|
0bf17a48f7 | ||
|
|
c127f8f2f0 | ||
|
|
d4d5d15e18 | ||
|
|
b254afa498 | ||
|
|
0ce5a94c15 | ||
|
|
38a5405c65 | ||
|
|
ecef784686 | ||
|
|
e82b2c12b4 | ||
|
|
3c26ced8db | ||
|
|
2681a41abb | ||
|
|
805deb1002 | ||
|
|
6b792de802 | ||
|
|
26f8e34dcb | ||
|
|
e0c1b2458c | ||
|
|
66f94d2141 | ||
|
|
503a9a0038 | ||
|
|
d8a5bc3ae9 | ||
|
|
94c3957d92 | ||
|
|
266350b8ce | ||
|
|
721d852cc7 | ||
|
|
ffbf205aba | ||
|
|
e1a1e43c3a | ||
|
|
c538817b61 | ||
|
|
71d85d2771 | ||
|
|
69f4d5fca7 | ||
|
|
bb31fc5301 | ||
|
|
87ce51e314 | ||
|
|
33555ca2ea | ||
|
|
6fc3776c0c | ||
|
|
8a05f05bd4 | ||
|
|
0dff51920e | ||
|
|
262f217d04 | ||
|
|
8b9154bab0 | ||
|
|
533613d92b | ||
|
|
20d87e1136 | ||
|
|
1450c4194e | ||
|
|
bc82baa968 | ||
|
|
218623be28 | ||
|
|
ea1a4ecdc6 | ||
|
|
6acbff3c11 | ||
|
|
153021efcf | ||
|
|
fef1b59b42 | ||
|
|
733b11b7d4 | ||
|
|
400514ff10 | ||
|
|
bbfac316fc | ||
|
|
7d0b67f70e | ||
|
|
d0f1b38848 | ||
|
|
4d4b5bc366 | ||
|
|
1f39c7ef0d | ||
|
|
2e97fcc47f | ||
|
|
550c8322c4 | ||
|
|
2467e23033 | ||
|
|
0e53198c59 | ||
|
|
44063590e2 | ||
|
|
557f25bf80 | ||
|
|
190531543f | ||
|
|
acfb7c3a89 | ||
|
|
94a6d3bc7e | ||
|
|
5751bcd382 | ||
|
|
21b88c0e65 | ||
|
|
f66b916d4b | ||
|
|
bd9c43a48d | ||
|
|
352b91f342 | ||
|
|
be6e3254ea | ||
|
|
dd1ee209ab | ||
|
|
c0b1101a52 | ||
|
|
52c49fc8fd | ||
|
|
77cb64958e | ||
|
|
b3d13e44b2 | ||
|
|
9dd7b795ca | ||
|
|
7a8399571a | ||
|
|
d0e85d9c2c | ||
|
|
14d02bc843 | ||
|
|
b79c09cf58 | ||
|
|
c9dfe5daff | ||
|
|
092e7f6b3c | ||
|
|
203634314c | ||
|
|
c472e6e160 | ||
|
|
86d9275375 | ||
|
|
9db6780af6 | ||
|
|
e10205ff69 | ||
|
|
9f539436b8 | ||
|
|
acd75e1083 | ||
|
|
f30225db90 | ||
|
|
007305962a | ||
|
|
8065e38797 | ||
|
|
34dc684867 | ||
|
|
7edc9603d0 | ||
|
|
479d476878 | ||
|
|
92bbec1852 | ||
|
|
d406636878 | ||
|
|
6362199363 | ||
|
|
22dbcb7590 | ||
|
|
cefea38ee5 | ||
|
|
d44cd01493 | ||
|
|
e578655653 | ||
|
|
434dc27557 | ||
|
|
79af39bd2c | ||
|
|
20d5a9fd4b | ||
|
|
80e57ca074 | ||
|
|
8a3c95d8dd | ||
|
|
4941a360cb | ||
|
|
9e7219c4d6 | ||
|
|
4d96728709 | ||
|
|
816fd5e65c | ||
|
|
8d4175536f | ||
|
|
bba9ca3d5a | ||
|
|
8bc9ebf2aa | ||
|
|
dad335b8b6 | ||
|
|
62e93d4002 | ||
|
|
728f4a0f81 | ||
|
|
0cafd6ee4b | ||
|
|
2962e51dac | ||
|
|
cf5733b237 | ||
|
|
c96e3326bc | ||
|
|
1cd373c0a5 | ||
|
|
ae970cf2da | ||
|
|
d43a01f182 | ||
|
|
42aac55b82 | ||
|
|
a66f31dc87 | ||
|
|
64c48f2151 | ||
|
|
9eead8a904 | ||
|
|
7761bccffe | ||
|
|
a848933875 | ||
|
|
e475f3d752 | ||
|
|
4d24dbc661 | ||
|
|
0368c3fae9 | ||
|
|
2cf93ccb54 | ||
|
|
3d72cafea4 | ||
|
|
af0466ea83 | ||
|
|
c84f2996ec | ||
|
|
add9b83d3b | ||
|
|
5411fb6fd4 | ||
|
|
49fc1b40e5 | ||
|
|
daef2eecdd | ||
|
|
1520422cc3 | ||
|
|
c7530085a6 | ||
|
|
bf43c567d8 | ||
|
|
b81f3e4f8d | ||
|
|
97051b9d40 | ||
|
|
90730845de | ||
|
|
f7c0c433c3 | ||
|
|
538752d0cf | ||
|
|
c71a92ac84 | ||
|
|
85f23b3408 | ||
|
|
44cab0a4d7 | ||
|
|
33db8ee0c3 | ||
|
|
da4b3770c0 | ||
|
|
12698998b9 | ||
|
|
6177856baf | ||
|
|
54b9c46b96 | ||
|
|
2dd83e7dbe | ||
|
|
17351e8f91 | ||
|
|
e8c153f72f | ||
|
|
ddfd1276c5 | ||
|
|
46251c6a1c | ||
|
|
e699968be5 | ||
|
|
389b58b75b | ||
|
|
a7cf34dea4 | ||
|
|
e601682706 | ||
|
|
55f16dc0b5 | ||
|
|
775011033f | ||
|
|
8ffe21a2dd | ||
|
|
73d63ef5ce | ||
|
|
e12b1a9184 | ||
|
|
d099a95b92 | ||
|
|
496ed90439 | ||
|
|
7883db1834 | ||
|
|
0cfc23b1a8 | ||
|
|
d33104aec1 | ||
|
|
711b2a431c | ||
|
|
09d21b5951 | ||
|
|
d9193387cc | ||
|
|
d5330ae2f3 | ||
|
|
571c1b47b5 | ||
|
|
0c9d4dd123 | ||
|
|
7d79408683 | ||
|
|
e6d4c3558b | ||
|
|
3be2109964 | ||
|
|
ce37ff26b5 | ||
|
|
03bbdb010f | ||
|
|
0bde1da42c | ||
|
|
987cb3bca9 | ||
|
|
0b11024967 | ||
|
|
dfaaedb466 | ||
|
|
8bc0d5544d | ||
|
|
c67e63bc09 | ||
|
|
129f5fae76 | ||
|
|
966a613ffe | ||
|
|
96ad107c19 | ||
|
|
ebdc126b00 | ||
|
|
5e1be966ed | ||
|
|
0022c1a67e | ||
|
|
6a0380b8c0 |
24
.github/workflows/close-stale.yml
vendored
24
.github/workflows/close-stale.yml
vendored
@@ -1,24 +0,0 @@
|
||||
name: 'Close stale issues and PRs'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
stale-issue-message: 'This issue has been labelled stale because it has been open for 2 weeks with no activity. Remove stale label or add a comment to keep this issue open. Otherwise, it will be closed in 7 days.'
|
||||
close-issue-message: 'This issue was closed because it has been stalled for 3 weeks with no activity. Feel free to add a comment here and we can re-open it. Or feel free to file a new issue any time.'
|
||||
days-before-stale: 14
|
||||
days-before-close: 7
|
||||
stale-issue-label: 'stale'
|
||||
stale-pr-label: 'stale'
|
||||
only-labels: 'question'
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
@@ -187,8 +187,8 @@ pytest
|
||||
You can also run specific test files or test cases by providing the file path or test name:
|
||||
|
||||
```
|
||||
pytest aider/tests/test_coder.py
|
||||
pytest aider/tests/test_coder.py::TestCoder::test_specific_case
|
||||
pytest tests/basic/test_coder.py
|
||||
pytest tests/basic/test_coder.py::TestCoder::test_specific_case
|
||||
```
|
||||
|
||||
#### Continuous Integration
|
||||
|
||||
40
HISTORY.md
40
HISTORY.md
@@ -3,6 +3,46 @@
|
||||
|
||||
### main branch
|
||||
|
||||
- Fixed bug in fuzzy model name matching when litellm provider info is missing.
|
||||
- Modified model metadata file loading to allow override of resource file.
|
||||
- Allow recursive loading of dirs using `--read`.
|
||||
- Updated dependency versions to pick up litellm fix for ollama models.
|
||||
- Added exponential backoff retry when writing files to handle editor file locks.
|
||||
- Updated Qwen 2.5 Coder 32B model configuration.
|
||||
|
||||
### Aider v0.63.1
|
||||
|
||||
- Fixed bug in git ignored file handling.
|
||||
- Improved error handling for git operations.
|
||||
|
||||
### Aider v0.63.0
|
||||
|
||||
- Support for Qwen 2.5 Coder 32B.
|
||||
- `/web` command just adds the page to the chat, without triggering an LLM response.
|
||||
- Improved prompting for the user's preferred chat language.
|
||||
- Improved handling of LiteLLM exceptions.
|
||||
- Bugfix for double-counting tokens when reporting cache stats.
|
||||
- Bugfix for the LLM creating new files.
|
||||
- Other small bug fixes.
|
||||
- Aider wrote 55% of the code in this release.
|
||||
|
||||
### Aider v0.62.0
|
||||
|
||||
- Full support for Claude 3.5 Haiku
|
||||
- Scored 75% on [aider's code editing leaderboard](https://aider.chat/docs/leaderboards/).
|
||||
- Almost as good as Sonnet at much lower cost.
|
||||
- Launch with `--haiku` to use it.
|
||||
- Easily apply file edits from ChatGPT, Claude or other web apps
|
||||
- Chat with ChatGPT or Claude via their web app.
|
||||
- Give it your source files and ask for the changes you want.
|
||||
- Use the web app's "copy response" button to copy the entire reply from the LLM.
|
||||
- Run `aider --apply-clipboard-edits file-to-edit.js`.
|
||||
- Aider will edit your file with the LLM's changes.
|
||||
- Bugfix for creating new files.
|
||||
- Aider wrote 84% of the code in this release.
|
||||
|
||||
### Aider v0.61.0
|
||||
|
||||
- Load and save aider slash-commands to files:
|
||||
- `/save <fname>` command will make a file of `/add` and `/read-only` commands that recreate the current file context in the chat.
|
||||
- `/load <fname>` will replay the commands in the file.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
try:
|
||||
from aider.__version__ import __version__
|
||||
except Exception:
|
||||
__version__ = "0.61.1.dev"
|
||||
__version__ = "0.63.3.dev"
|
||||
|
||||
__all__ = [__version__]
|
||||
|
||||
@@ -66,6 +66,14 @@ def get_parser(default_config_files, git_root):
|
||||
const=sonnet_model,
|
||||
help=f"Use {sonnet_model} model for the main chat",
|
||||
)
|
||||
haiku_model = "claude-3-5-haiku-20241022"
|
||||
group.add_argument(
|
||||
"--haiku",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=haiku_model,
|
||||
help=f"Use {haiku_model} model for the main chat",
|
||||
)
|
||||
gpt_4_model = "gpt-4-0613"
|
||||
group.add_argument(
|
||||
"--4",
|
||||
@@ -630,6 +638,12 @@ def get_parser(default_config_files, git_root):
|
||||
metavar="FILE",
|
||||
help="Apply the changes from the given file instead of running the chat (debug)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--apply-clipboard-edits",
|
||||
action="store_true",
|
||||
help="Apply clipboard contents as edits using the main model's editor format",
|
||||
default=False,
|
||||
)
|
||||
group.add_argument(
|
||||
"--yes-always",
|
||||
action="store_true",
|
||||
|
||||
@@ -10,6 +10,9 @@ class ArchitectCoder(AskCoder):
|
||||
def reply_completed(self):
|
||||
content = self.partial_response_content
|
||||
|
||||
if not content or not content.strip():
|
||||
return
|
||||
|
||||
if not self.io.confirm_ask("Edit the files?"):
|
||||
return
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ Just show the changes needed.
|
||||
|
||||
DO NOT show the entire updated function/file/etc!
|
||||
|
||||
Always reply in the same language as the change request.
|
||||
Always reply to the user in {language}.
|
||||
"""
|
||||
|
||||
example_messages = []
|
||||
|
||||
@@ -6,7 +6,7 @@ from .base_prompts import CoderPrompts
|
||||
class AskPrompts(CoderPrompts):
|
||||
main_system = """Act as an expert code analyst.
|
||||
Answer questions about the supplied code.
|
||||
Always reply to the user in the same language they are using.
|
||||
Always reply to the user in {language}.
|
||||
"""
|
||||
|
||||
example_messages = []
|
||||
|
||||
@@ -13,7 +13,6 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import webbrowser
|
||||
from collections import defaultdict
|
||||
from datetime import datetime
|
||||
from json.decoder import JSONDecodeError
|
||||
@@ -23,6 +22,7 @@ from typing import List
|
||||
from aider import __version__, models, prompts, urls, utils
|
||||
from aider.analytics import Analytics
|
||||
from aider.commands import Commands
|
||||
from aider.exceptions import LiteLLMExceptions
|
||||
from aider.history import ChatSummary
|
||||
from aider.io import ConfirmGroup, InputOutput
|
||||
from aider.linter import Linter
|
||||
@@ -30,7 +30,7 @@ from aider.llm import litellm
|
||||
from aider.repo import ANY_GIT_ERROR, GitRepo
|
||||
from aider.repomap import RepoMap
|
||||
from aider.run_cmd import run_cmd
|
||||
from aider.sendchat import RETRY_TIMEOUT, retry_exceptions, send_completion
|
||||
from aider.sendchat import RETRY_TIMEOUT, send_completion
|
||||
from aider.utils import format_content, format_messages, format_tokens, is_image_file
|
||||
|
||||
from ..dump import dump # noqa: F401
|
||||
@@ -355,6 +355,9 @@ class Coder:
|
||||
|
||||
for fname in fnames:
|
||||
fname = Path(fname)
|
||||
if self.repo and self.repo.git_ignored_file(fname):
|
||||
self.io.tool_warning(f"Skipping {fname} that matches gitignore spec.")
|
||||
|
||||
if self.repo and self.repo.ignored_file(fname):
|
||||
self.io.tool_warning(f"Skipping {fname} that matches aiderignore spec.")
|
||||
continue
|
||||
@@ -790,34 +793,9 @@ class Coder:
|
||||
self.num_reflections += 1
|
||||
message = self.reflected_message
|
||||
|
||||
def check_and_open_urls(self, exc: Exception) -> List[str]:
|
||||
import openai
|
||||
|
||||
def check_and_open_urls(self, exc, friendly_msg=None):
|
||||
"""Check exception for URLs, offer to open in a browser, with user-friendly error msgs."""
|
||||
text = str(exc)
|
||||
friendly_msg = None
|
||||
|
||||
if isinstance(exc, (openai.APITimeoutError, openai.APIConnectionError)):
|
||||
friendly_msg = (
|
||||
"There is a problem connecting to the API provider. Please try again later or check"
|
||||
" your model settings."
|
||||
)
|
||||
elif isinstance(exc, openai.RateLimitError):
|
||||
friendly_msg = (
|
||||
"The API provider's rate limits have been exceeded. Check with your provider or"
|
||||
" wait awhile and retry."
|
||||
)
|
||||
elif isinstance(exc, openai.InternalServerError):
|
||||
friendly_msg = (
|
||||
"The API provider seems to be down or overloaded. Please try again later."
|
||||
)
|
||||
elif isinstance(exc, openai.BadRequestError):
|
||||
friendly_msg = "The API provider refused the request as invalid?"
|
||||
elif isinstance(exc, openai.AuthenticationError):
|
||||
friendly_msg = (
|
||||
"The API provider refused your authentication. Please check that you are using a"
|
||||
" valid API key."
|
||||
)
|
||||
|
||||
if friendly_msg:
|
||||
self.io.tool_warning(text)
|
||||
@@ -829,8 +807,7 @@ class Coder:
|
||||
urls = list(set(url_pattern.findall(text))) # Use set to remove duplicates
|
||||
for url in urls:
|
||||
url = url.rstrip(".',\"")
|
||||
if self.io.confirm_ask("Open URL for more info about this error?", subject=url):
|
||||
webbrowser.open(url)
|
||||
self.io.offer_url(url)
|
||||
return urls
|
||||
|
||||
def check_for_urls(self, inp: str) -> List[str]:
|
||||
@@ -846,7 +823,7 @@ class Coder:
|
||||
"Add URL to the chat?", subject=url, group=group, allow_never=True
|
||||
):
|
||||
inp += "\n\n"
|
||||
inp += self.commands.cmd_web(url)
|
||||
inp += self.commands.cmd_web(url, return_content=True)
|
||||
added_urls.append(url)
|
||||
else:
|
||||
self.rejected_urls.add(url)
|
||||
@@ -982,12 +959,18 @@ class Coder:
|
||||
platform=platform_text
|
||||
)
|
||||
|
||||
if self.chat_language:
|
||||
language = self.chat_language
|
||||
else:
|
||||
language = "in the same language they are using"
|
||||
|
||||
prompt = prompt.format(
|
||||
fence=self.fence,
|
||||
lazy_prompt=lazy_prompt,
|
||||
platform=platform_text,
|
||||
shell_cmd_prompt=shell_cmd_prompt,
|
||||
shell_cmd_reminder=shell_cmd_reminder,
|
||||
language=language,
|
||||
)
|
||||
return prompt
|
||||
|
||||
@@ -1154,8 +1137,6 @@ class Coder:
|
||||
return chunks
|
||||
|
||||
def send_message(self, inp):
|
||||
import openai # for error codes below
|
||||
|
||||
self.cur_messages += [
|
||||
dict(role="user", content=inp),
|
||||
]
|
||||
@@ -1175,6 +1156,8 @@ class Coder:
|
||||
|
||||
retry_delay = 0.125
|
||||
|
||||
litellm_ex = LiteLLMExceptions()
|
||||
|
||||
self.usage_report = None
|
||||
exhausted = False
|
||||
interrupted = False
|
||||
@@ -1183,30 +1166,37 @@ class Coder:
|
||||
try:
|
||||
yield from self.send(messages, functions=self.functions)
|
||||
break
|
||||
except retry_exceptions() as err:
|
||||
# Print the error and its base classes
|
||||
# for cls in err.__class__.__mro__: dump(cls.__name__)
|
||||
except litellm_ex.exceptions_tuple() as err:
|
||||
ex_info = litellm_ex.get_ex_info(err)
|
||||
|
||||
retry_delay *= 2
|
||||
if retry_delay > RETRY_TIMEOUT:
|
||||
self.mdstream = None
|
||||
self.check_and_open_urls(err)
|
||||
if ex_info.name == "ContextWindowExceededError":
|
||||
exhausted = True
|
||||
break
|
||||
|
||||
should_retry = ex_info.retry
|
||||
if should_retry:
|
||||
retry_delay *= 2
|
||||
if retry_delay > RETRY_TIMEOUT:
|
||||
should_retry = False
|
||||
|
||||
if not should_retry:
|
||||
self.mdstream = None
|
||||
self.check_and_open_urls(err, ex_info.description)
|
||||
break
|
||||
|
||||
err_msg = str(err)
|
||||
self.io.tool_error(err_msg)
|
||||
if ex_info.description:
|
||||
self.io.tool_warning(err_msg)
|
||||
self.io.tool_error(ex_info.description)
|
||||
else:
|
||||
self.io.tool_error(err_msg)
|
||||
|
||||
self.io.tool_output(f"Retrying in {retry_delay:.1f} seconds...")
|
||||
time.sleep(retry_delay)
|
||||
continue
|
||||
except KeyboardInterrupt:
|
||||
interrupted = True
|
||||
break
|
||||
except litellm.ContextWindowExceededError:
|
||||
# The input is overflowing the context window!
|
||||
exhausted = True
|
||||
break
|
||||
except litellm.exceptions.BadRequestError as br_err:
|
||||
self.io.tool_error(f"BadRequestError: {br_err}")
|
||||
return
|
||||
except FinishReasonLength:
|
||||
# We hit the output limit!
|
||||
if not self.main_model.info.get("supports_assistant_prefill"):
|
||||
@@ -1221,12 +1211,8 @@ class Coder:
|
||||
messages.append(
|
||||
dict(role="assistant", content=self.multi_response_content, prefix=True)
|
||||
)
|
||||
except (openai.APIError, openai.APIStatusError) as err:
|
||||
# for cls in err.__class__.__mro__: dump(cls.__name__)
|
||||
self.mdstream = None
|
||||
self.check_and_open_urls(err)
|
||||
break
|
||||
except Exception as err:
|
||||
self.mdstream = None
|
||||
lines = traceback.format_exception(type(err), err, err.__traceback__)
|
||||
self.io.tool_warning("".join(lines))
|
||||
self.io.tool_error(str(err))
|
||||
@@ -1374,11 +1360,9 @@ class Coder:
|
||||
res.append("- Use /clear to clear the chat history.")
|
||||
res.append("- Break your code into smaller source files.")
|
||||
|
||||
res.append("")
|
||||
res.append(f"For more info: {urls.token_limits}")
|
||||
|
||||
res = "".join([line + "\n" for line in res])
|
||||
self.io.tool_error(res)
|
||||
self.io.offer_url(urls.token_limits)
|
||||
|
||||
def lint_edited(self, fnames):
|
||||
res = ""
|
||||
@@ -1622,7 +1606,6 @@ class Coder:
|
||||
completion.usage, "cache_creation_input_tokens"
|
||||
):
|
||||
self.message_tokens_sent += prompt_tokens
|
||||
self.message_tokens_sent += cache_hit_tokens
|
||||
self.message_tokens_sent += cache_write_tokens
|
||||
else:
|
||||
self.message_tokens_sent += prompt_tokens
|
||||
@@ -1799,6 +1782,10 @@ class Coder:
|
||||
self.check_for_dirty_commit(path)
|
||||
return True
|
||||
|
||||
if self.repo and self.repo.git_ignored_file(path):
|
||||
self.io.tool_warning(f"Skipping edits to {path} that matches gitignore spec.")
|
||||
return
|
||||
|
||||
if not Path(full_path).exists():
|
||||
if not self.io.confirm_ask("Create new file?", subject=path):
|
||||
self.io.tool_output(f"Skipping edits to {path}")
|
||||
|
||||
@@ -46,9 +46,16 @@ class EditBlockCoder(Coder):
|
||||
for edit in edits:
|
||||
path, original, updated = edit
|
||||
full_path = self.abs_root_path(path)
|
||||
content = self.io.read_text(full_path)
|
||||
new_content = do_replace(full_path, content, original, updated, self.fence)
|
||||
if not new_content:
|
||||
new_content = None
|
||||
|
||||
if Path(full_path).exists():
|
||||
content = self.io.read_text(full_path)
|
||||
new_content = do_replace(full_path, content, original, updated, self.fence)
|
||||
|
||||
# If the edit failed, and
|
||||
# this is not a "create a new file" with an empty original...
|
||||
# https://github.com/Aider-AI/aider/issues/2258
|
||||
if not new_content and original.strip():
|
||||
# try patching any of the other files in the chat
|
||||
for full_path in self.abs_fnames:
|
||||
content = self.io.read_text(full_path)
|
||||
|
||||
@@ -11,7 +11,7 @@ Respect and use existing conventions, libraries, etc that are already present in
|
||||
Take requests for changes to the supplied code.
|
||||
If the request is ambiguous, ask questions.
|
||||
|
||||
Always reply to the user in the same language they are using.
|
||||
Always reply to the user in {language}.
|
||||
|
||||
Once you understand the request you MUST:
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ Respect and use existing conventions, libraries, etc that are already present in
|
||||
Take requests for changes to the supplied code.
|
||||
If the request is ambiguous, ask questions.
|
||||
|
||||
Always reply to the user in the same language they are using.
|
||||
Always reply to the user in {language}.
|
||||
|
||||
For each file that needs to be changed, write out the changes similar to a unified diff like `diff -U0` would produce.
|
||||
"""
|
||||
|
||||
@@ -8,7 +8,7 @@ class WholeFilePrompts(CoderPrompts):
|
||||
Take requests for changes to the supplied code.
|
||||
If the request is ambiguous, ask questions.
|
||||
|
||||
Always reply to the user in the same language they are using.
|
||||
Always reply to the user in {language}.
|
||||
|
||||
{lazy_prompt}
|
||||
Once you understand the request you MUST:
|
||||
|
||||
@@ -139,7 +139,7 @@ class Commands:
|
||||
else:
|
||||
self.io.tool_output("Please provide a partial model name to search for.")
|
||||
|
||||
def cmd_web(self, args):
|
||||
def cmd_web(self, args, return_content=False):
|
||||
"Scrape a webpage, convert to markdown and send in a message"
|
||||
|
||||
url = args.strip()
|
||||
@@ -158,11 +158,16 @@ class Commands:
|
||||
)
|
||||
|
||||
content = self.scraper.scrape(url) or ""
|
||||
content = f"{url}:\n\n" + content
|
||||
content = f"Here is the content of {url}:\n\n" + content
|
||||
if return_content:
|
||||
return content
|
||||
|
||||
self.io.tool_output("... done.")
|
||||
self.io.tool_output("... added to chat.")
|
||||
|
||||
return content
|
||||
self.coder.cur_messages += [
|
||||
dict(role="user", content=content),
|
||||
dict(role="assistant", content="Ok."),
|
||||
]
|
||||
|
||||
def is_command(self, inp):
|
||||
return inp[0] in "/!"
|
||||
@@ -738,6 +743,10 @@ class Commands:
|
||||
)
|
||||
continue
|
||||
|
||||
if self.coder.repo and self.coder.repo.git_ignored_file(matched_file):
|
||||
self.io.tool_error(f"Can't add {matched_file} which is in gitignore")
|
||||
continue
|
||||
|
||||
if abs_file_path in self.coder.abs_fnames:
|
||||
self.io.tool_error(f"{matched_file} is already in the chat as an editable file")
|
||||
continue
|
||||
|
||||
76
aider/exceptions.py
Normal file
76
aider/exceptions.py
Normal file
@@ -0,0 +1,76 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExInfo:
|
||||
name: str
|
||||
retry: bool
|
||||
description: str
|
||||
|
||||
|
||||
EXCEPTIONS = [
|
||||
ExInfo("APIConnectionError", True, None),
|
||||
ExInfo("APIError", True, None),
|
||||
ExInfo("APIResponseValidationError", True, None),
|
||||
ExInfo(
|
||||
"AuthenticationError",
|
||||
False,
|
||||
"The API provider is not able to authenticate you. Check your API key.",
|
||||
),
|
||||
ExInfo("AzureOpenAIError", True, None),
|
||||
ExInfo("BadRequestError", False, None),
|
||||
ExInfo("BudgetExceededError", True, None),
|
||||
ExInfo(
|
||||
"ContentPolicyViolationError",
|
||||
True,
|
||||
"The API provider has refused the request due to a safety policy about the content.",
|
||||
),
|
||||
ExInfo("ContextWindowExceededError", False, None), # special case handled in base_coder
|
||||
ExInfo("InternalServerError", True, "The API provider's servers are down or overloaded."),
|
||||
ExInfo("InvalidRequestError", True, None),
|
||||
ExInfo("JSONSchemaValidationError", True, None),
|
||||
ExInfo("NotFoundError", False, None),
|
||||
ExInfo("OpenAIError", True, None),
|
||||
ExInfo(
|
||||
"RateLimitError",
|
||||
True,
|
||||
"The API provider has rate limited you. Try again later or check your quotas.",
|
||||
),
|
||||
ExInfo("RouterRateLimitError", True, None),
|
||||
ExInfo("ServiceUnavailableError", True, "The API provider's servers are down or overloaded."),
|
||||
ExInfo("UnprocessableEntityError", True, None),
|
||||
ExInfo("UnsupportedParamsError", True, None),
|
||||
]
|
||||
|
||||
|
||||
class LiteLLMExceptions:
|
||||
exceptions = dict()
|
||||
|
||||
def __init__(self):
|
||||
self._load()
|
||||
|
||||
def _load(self, strict=False):
|
||||
import litellm
|
||||
|
||||
for var in dir(litellm):
|
||||
if not var.endswith("Error"):
|
||||
continue
|
||||
|
||||
ex_info = None
|
||||
for exi in EXCEPTIONS:
|
||||
if var == exi.name:
|
||||
ex_info = exi
|
||||
break
|
||||
|
||||
if strict and not ex_info:
|
||||
raise ValueError(f"{var} is in litellm but not in aider's exceptions list")
|
||||
|
||||
ex = getattr(litellm, var)
|
||||
self.exceptions[ex] = ex_info
|
||||
|
||||
def exceptions_tuple(self):
|
||||
return tuple(self.exceptions)
|
||||
|
||||
def get_ex_info(self, ex):
|
||||
"""Return the ExInfo for a given exception instance"""
|
||||
return self.exceptions.get(ex.__class__, ExInfo(None, None, None))
|
||||
65
aider/io.py
65
aider/io.py
@@ -1,5 +1,7 @@
|
||||
import base64
|
||||
import os
|
||||
import time
|
||||
import webbrowser
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
@@ -332,14 +334,36 @@ class InputOutput:
|
||||
self.tool_error("Use --encoding to set the unicode encoding.")
|
||||
return
|
||||
|
||||
def write_text(self, filename, content):
|
||||
def write_text(self, filename, content, max_retries=5, initial_delay=0.1):
|
||||
"""
|
||||
Writes content to a file, retrying with progressive backoff if the file is locked.
|
||||
|
||||
:param filename: Path to the file to write.
|
||||
:param content: Content to write to the file.
|
||||
:param max_retries: Maximum number of retries if a file lock is encountered.
|
||||
:param initial_delay: Initial delay (in seconds) before the first retry.
|
||||
"""
|
||||
if self.dry_run:
|
||||
return
|
||||
try:
|
||||
with open(str(filename), "w", encoding=self.encoding) as f:
|
||||
f.write(content)
|
||||
except OSError as err:
|
||||
self.tool_error(f"Unable to write file {filename}: {err}")
|
||||
|
||||
delay = initial_delay
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
with open(str(filename), "w", encoding=self.encoding) as f:
|
||||
f.write(content)
|
||||
return # Successfully wrote the file
|
||||
except PermissionError as err:
|
||||
if attempt < max_retries - 1:
|
||||
time.sleep(delay)
|
||||
delay *= 2 # Exponential backoff
|
||||
else:
|
||||
self.tool_error(
|
||||
f"Unable to write file {filename} after {max_retries} attempts: {err}"
|
||||
)
|
||||
raise
|
||||
except OSError as err:
|
||||
self.tool_error(f"Unable to write file {filename}: {err}")
|
||||
raise
|
||||
|
||||
def rule(self):
|
||||
if self.pretty:
|
||||
@@ -484,6 +508,15 @@ class InputOutput:
|
||||
hist = "\n" + content.strip() + "\n\n"
|
||||
self.append_chat_history(hist)
|
||||
|
||||
def offer_url(self, url, prompt="Open URL for more info?"):
|
||||
"""Offer to open a URL in the browser, returns True if opened."""
|
||||
if url in self.never_prompts:
|
||||
return False
|
||||
if self.confirm_ask(prompt, subject=url, allow_never=True):
|
||||
webbrowser.open(url)
|
||||
return True
|
||||
return False
|
||||
|
||||
def confirm_ask(
|
||||
self,
|
||||
question,
|
||||
@@ -722,13 +755,23 @@ class InputOutput:
|
||||
editable_files = [f for f in sorted(rel_fnames) if f not in rel_read_only_fnames]
|
||||
|
||||
if read_only_files:
|
||||
console.print("Read only files:", style="bold")
|
||||
console.print(Columns(read_only_files))
|
||||
files_with_label = ["Readonly:"] + read_only_files
|
||||
read_only_output = StringIO()
|
||||
Console(file=read_only_output, force_terminal=False).print(Columns(files_with_label))
|
||||
read_only_lines = read_only_output.getvalue().splitlines()
|
||||
console.print(Columns(files_with_label))
|
||||
|
||||
if editable_files:
|
||||
files_with_label = editable_files
|
||||
if read_only_files:
|
||||
console.print()
|
||||
console.print("Editable files:", style="bold")
|
||||
console.print(Columns(editable_files))
|
||||
files_with_label = ["Editable:"] + editable_files
|
||||
editable_output = StringIO()
|
||||
Console(file=editable_output, force_terminal=False).print(Columns(files_with_label))
|
||||
editable_lines = editable_output.getvalue().splitlines()
|
||||
|
||||
if len(read_only_lines) > 1 or len(editable_lines) > 1:
|
||||
console.print()
|
||||
console.print(Columns(files_with_label))
|
||||
|
||||
return output.getvalue()
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import re
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
import webbrowser
|
||||
from pathlib import Path
|
||||
|
||||
import git
|
||||
@@ -88,15 +87,25 @@ def make_new_repo(git_root, io):
|
||||
|
||||
|
||||
def setup_git(git_root, io):
|
||||
try:
|
||||
cwd = Path.cwd()
|
||||
except OSError:
|
||||
cwd = None
|
||||
|
||||
repo = None
|
||||
|
||||
if git_root:
|
||||
repo = git.Repo(git_root)
|
||||
elif Path.cwd() == Path.home():
|
||||
try:
|
||||
repo = git.Repo(git_root)
|
||||
except ANY_GIT_ERROR:
|
||||
pass
|
||||
elif cwd == Path.home():
|
||||
io.tool_warning("You should probably run aider in a directory, not your home dir.")
|
||||
return
|
||||
elif io.confirm_ask("No git repo found, create one to track aider's changes (recommended)?"):
|
||||
git_root = str(Path.cwd().resolve())
|
||||
elif cwd and io.confirm_ask(
|
||||
"No git repo found, create one to track aider's changes (recommended)?"
|
||||
):
|
||||
git_root = str(cwd.resolve())
|
||||
repo = make_new_repo(git_root, io)
|
||||
|
||||
if not repo:
|
||||
@@ -323,14 +332,16 @@ def load_dotenv_files(git_root, dotenv_fname, encoding="utf-8"):
|
||||
|
||||
|
||||
def register_litellm_models(git_root, model_metadata_fname, io, verbose=False):
|
||||
model_metatdata_files = generate_search_path_list(
|
||||
".aider.model.metadata.json", git_root, model_metadata_fname
|
||||
)
|
||||
model_metatdata_files = []
|
||||
|
||||
# Add the resource file path
|
||||
resource_metadata = importlib_resources.files("aider.resources").joinpath("model-metadata.json")
|
||||
model_metatdata_files.append(str(resource_metadata))
|
||||
|
||||
model_metatdata_files += generate_search_path_list(
|
||||
".aider.model.metadata.json", git_root, model_metadata_fname
|
||||
)
|
||||
|
||||
try:
|
||||
model_metadata_files_loaded = models.register_litellm_models(model_metatdata_files)
|
||||
if len(model_metadata_files_loaded) > 0 and verbose:
|
||||
@@ -367,8 +378,7 @@ def sanity_check_repo(repo, io):
|
||||
io.tool_error("Aider only works with git repos with version number 1 or 2.")
|
||||
io.tool_output("You may be able to convert your repo: git update-index --index-version=2")
|
||||
io.tool_output("Or run aider --no-git to proceed without using git.")
|
||||
if io.confirm_ask("Open documentation url for more info?", subject=urls.git_index_version):
|
||||
webbrowser.open(urls.git_index_version)
|
||||
io.offer_url(urls.git_index_version, "Open documentation url for more info?")
|
||||
return False
|
||||
|
||||
io.tool_error("Unable to read git repository, it may be corrupt?")
|
||||
@@ -432,7 +442,6 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
if args.analytics_disable:
|
||||
analytics = Analytics(permanently_disable=True)
|
||||
print("Analytics have been permanently disabled.")
|
||||
return
|
||||
|
||||
if not args.verify_ssl:
|
||||
import httpx
|
||||
@@ -533,7 +542,14 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
|
||||
all_files = args.files + (args.file or [])
|
||||
fnames = [str(Path(fn).resolve()) for fn in all_files]
|
||||
read_only_fnames = [str(Path(fn).resolve()) for fn in (args.read or [])]
|
||||
read_only_fnames = []
|
||||
for fn in args.read or []:
|
||||
path = Path(fn).resolve()
|
||||
if path.is_dir():
|
||||
read_only_fnames.extend(str(f) for f in path.rglob("*") if f.is_file())
|
||||
else:
|
||||
read_only_fnames.append(str(path))
|
||||
|
||||
if len(all_files) > 1:
|
||||
good = True
|
||||
for fname in all_files:
|
||||
@@ -642,10 +658,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
io.tool_output("You can skip this check with --no-show-model-warnings")
|
||||
|
||||
try:
|
||||
if io.confirm_ask(
|
||||
"Open documentation url for more info?", subject=urls.model_warnings
|
||||
):
|
||||
webbrowser.open(urls.model_warnings)
|
||||
io.offer_url(urls.model_warnings, "Open documentation url for more info?")
|
||||
io.tool_output()
|
||||
except KeyboardInterrupt:
|
||||
return 1
|
||||
@@ -774,6 +787,10 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||
coder.apply_updates()
|
||||
return
|
||||
|
||||
if args.apply_clipboard_edits:
|
||||
args.edit_format = main_model.editor_edit_format
|
||||
args.message = "/paste"
|
||||
|
||||
if "VSCODE_GIT_IPC_HANDLE" in os.environ:
|
||||
args.pretty = False
|
||||
io.tool_output("VSCode terminal detected, pretty output has been disabled.")
|
||||
@@ -866,10 +883,7 @@ def check_and_load_imports(io, verbose=False):
|
||||
except Exception as err:
|
||||
io.tool_error(str(err))
|
||||
io.tool_output("Error loading required imports. Did you install aider properly?")
|
||||
if io.confirm_ask(
|
||||
"Open documentation url for more info?", subject=urls.install_properly
|
||||
):
|
||||
webbrowser.open(urls.install_properly)
|
||||
io.offer_url(urls.install_properly, "Open documentation url for more info?")
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
153
aider/models.py
153
aider/models.py
@@ -52,6 +52,7 @@ ANTHROPIC_MODELS = """
|
||||
claude-2
|
||||
claude-2.1
|
||||
claude-3-haiku-20240307
|
||||
claude-3-5-haiku-20241022
|
||||
claude-3-opus-20240229
|
||||
claude-3-sonnet-20240229
|
||||
claude-3-5-sonnet-20240620
|
||||
@@ -233,24 +234,24 @@ MODEL_SETTINGS = [
|
||||
ModelSettings(
|
||||
"claude-3-opus-20240229",
|
||||
"diff",
|
||||
weak_model_name="claude-3-haiku-20240307",
|
||||
weak_model_name="claude-3-5-haiku-20241022",
|
||||
use_repo_map=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"openrouter/anthropic/claude-3-opus",
|
||||
"diff",
|
||||
weak_model_name="openrouter/anthropic/claude-3-haiku",
|
||||
weak_model_name="openrouter/anthropic/claude-3-5-haiku",
|
||||
use_repo_map=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"claude-3-sonnet-20240229",
|
||||
"whole",
|
||||
weak_model_name="claude-3-haiku-20240307",
|
||||
weak_model_name="claude-3-5-haiku-20241022",
|
||||
),
|
||||
ModelSettings(
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"diff",
|
||||
weak_model_name="claude-3-haiku-20240307",
|
||||
weak_model_name="claude-3-5-haiku-20241022",
|
||||
editor_model_name="claude-3-5-sonnet-20240620",
|
||||
editor_edit_format="editor-diff",
|
||||
use_repo_map=True,
|
||||
@@ -267,7 +268,7 @@ MODEL_SETTINGS = [
|
||||
ModelSettings(
|
||||
"anthropic/claude-3-5-sonnet-20240620",
|
||||
"diff",
|
||||
weak_model_name="anthropic/claude-3-haiku-20240307",
|
||||
weak_model_name="anthropic/claude-3-5-haiku-20241022",
|
||||
editor_model_name="anthropic/claude-3-5-sonnet-20240620",
|
||||
editor_edit_format="editor-diff",
|
||||
use_repo_map=True,
|
||||
@@ -284,7 +285,7 @@ MODEL_SETTINGS = [
|
||||
ModelSettings(
|
||||
"anthropic/claude-3-5-sonnet-20241022",
|
||||
"diff",
|
||||
weak_model_name="anthropic/claude-3-haiku-20240307",
|
||||
weak_model_name="anthropic/claude-3-5-haiku-20241022",
|
||||
editor_model_name="anthropic/claude-3-5-sonnet-20241022",
|
||||
editor_edit_format="editor-diff",
|
||||
use_repo_map=True,
|
||||
@@ -298,10 +299,27 @@ MODEL_SETTINGS = [
|
||||
cache_control=True,
|
||||
reminder="user",
|
||||
),
|
||||
ModelSettings(
|
||||
"bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"diff",
|
||||
weak_model_name="bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
editor_model_name="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
editor_edit_format="editor-diff",
|
||||
use_repo_map=True,
|
||||
examples_as_sys_msg=True,
|
||||
extra_params={
|
||||
"extra_headers": {
|
||||
"anthropic-beta": ANTHROPIC_BETA_HEADER,
|
||||
},
|
||||
"max_tokens": 8192,
|
||||
},
|
||||
cache_control=True,
|
||||
reminder="user",
|
||||
),
|
||||
ModelSettings(
|
||||
"anthropic/claude-3-5-sonnet-latest",
|
||||
"diff",
|
||||
weak_model_name="anthropic/claude-3-haiku-20240307",
|
||||
weak_model_name="anthropic/claude-3-5-haiku-20241022",
|
||||
editor_model_name="anthropic/claude-3-5-sonnet-20241022",
|
||||
editor_edit_format="editor-diff",
|
||||
use_repo_map=True,
|
||||
@@ -318,7 +336,7 @@ MODEL_SETTINGS = [
|
||||
ModelSettings(
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"diff",
|
||||
weak_model_name="claude-3-haiku-20240307",
|
||||
weak_model_name="claude-3-5-haiku-20241022",
|
||||
editor_model_name="claude-3-5-sonnet-20241022",
|
||||
editor_edit_format="editor-diff",
|
||||
use_repo_map=True,
|
||||
@@ -344,6 +362,52 @@ MODEL_SETTINGS = [
|
||||
},
|
||||
cache_control=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"anthropic/claude-3-5-haiku-20241022",
|
||||
"diff",
|
||||
weak_model_name="anthropic/claude-3-5-haiku-20241022",
|
||||
use_repo_map=True,
|
||||
extra_params={
|
||||
"extra_headers": {
|
||||
"anthropic-beta": ANTHROPIC_BETA_HEADER,
|
||||
},
|
||||
},
|
||||
cache_control=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
"diff",
|
||||
weak_model_name="bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
use_repo_map=True,
|
||||
extra_params={
|
||||
"extra_headers": {
|
||||
"anthropic-beta": ANTHROPIC_BETA_HEADER,
|
||||
},
|
||||
},
|
||||
cache_control=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"claude-3-5-haiku-20241022",
|
||||
"diff",
|
||||
weak_model_name="claude-3-5-haiku-20241022",
|
||||
use_repo_map=True,
|
||||
examples_as_sys_msg=True,
|
||||
extra_params={
|
||||
"extra_headers": {
|
||||
"anthropic-beta": ANTHROPIC_BETA_HEADER,
|
||||
},
|
||||
},
|
||||
cache_control=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"vertex_ai/claude-3-5-haiku@20241022",
|
||||
"diff",
|
||||
weak_model_name="vertex_ai/claude-3-5-haiku@20241022",
|
||||
use_repo_map=True,
|
||||
extra_params={
|
||||
"max_tokens": 4096,
|
||||
},
|
||||
),
|
||||
ModelSettings(
|
||||
"claude-3-haiku-20240307",
|
||||
"whole",
|
||||
@@ -359,7 +423,7 @@ MODEL_SETTINGS = [
|
||||
ModelSettings(
|
||||
"openrouter/anthropic/claude-3.5-sonnet",
|
||||
"diff",
|
||||
weak_model_name="openrouter/anthropic/claude-3-haiku",
|
||||
weak_model_name="openrouter/anthropic/claude-3-5-haiku",
|
||||
editor_model_name="openrouter/anthropic/claude-3.5-sonnet",
|
||||
editor_edit_format="editor-diff",
|
||||
use_repo_map=True,
|
||||
@@ -373,7 +437,7 @@ MODEL_SETTINGS = [
|
||||
ModelSettings(
|
||||
"openrouter/anthropic/claude-3.5-sonnet:beta",
|
||||
"diff",
|
||||
weak_model_name="openrouter/anthropic/claude-3-haiku:beta",
|
||||
weak_model_name="openrouter/anthropic/claude-3-5-haiku:beta",
|
||||
editor_model_name="openrouter/anthropic/claude-3.5-sonnet:beta",
|
||||
editor_edit_format="editor-diff",
|
||||
use_repo_map=True,
|
||||
@@ -389,7 +453,7 @@ MODEL_SETTINGS = [
|
||||
ModelSettings(
|
||||
"vertex_ai/claude-3-5-sonnet@20240620",
|
||||
"diff",
|
||||
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
||||
weak_model_name="vertex_ai/claude-3-5-haiku@20241022",
|
||||
editor_model_name="vertex_ai/claude-3-5-sonnet@20240620",
|
||||
editor_edit_format="editor-diff",
|
||||
use_repo_map=True,
|
||||
@@ -402,7 +466,7 @@ MODEL_SETTINGS = [
|
||||
ModelSettings(
|
||||
"vertex_ai/claude-3-5-sonnet-v2@20241022",
|
||||
"diff",
|
||||
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
||||
weak_model_name="vertex_ai/claude-3-5-haiku@20241022",
|
||||
editor_model_name="vertex_ai/claude-3-5-sonnet-v2@20241022",
|
||||
editor_edit_format="editor-diff",
|
||||
use_repo_map=True,
|
||||
@@ -415,13 +479,13 @@ MODEL_SETTINGS = [
|
||||
ModelSettings(
|
||||
"vertex_ai/claude-3-opus@20240229",
|
||||
"diff",
|
||||
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
||||
weak_model_name="vertex_ai/claude-3-5-haiku@20241022",
|
||||
use_repo_map=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"vertex_ai/claude-3-sonnet@20240229",
|
||||
"whole",
|
||||
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
||||
weak_model_name="vertex_ai/claude-3-5-haiku@20241022",
|
||||
),
|
||||
# Cohere
|
||||
ModelSettings(
|
||||
@@ -486,6 +550,11 @@ MODEL_SETTINGS = [
|
||||
"diff-fenced",
|
||||
use_repo_map=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"vertex_ai/gemini-pro-experimental",
|
||||
"diff-fenced",
|
||||
use_repo_map=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"gemini/gemini-1.5-flash-exp-0827",
|
||||
"whole",
|
||||
@@ -646,6 +715,14 @@ MODEL_SETTINGS = [
|
||||
use_temperature=False,
|
||||
streaming=False,
|
||||
),
|
||||
ModelSettings(
|
||||
"openrouter/qwen/qwen-2.5-coder-32b-instruct",
|
||||
"diff",
|
||||
weak_model_name="openrouter/qwen/qwen-2.5-coder-32b-instruct",
|
||||
editor_model_name="openrouter/qwen/qwen-2.5-coder-32b-instruct",
|
||||
editor_edit_format="editor-diff",
|
||||
use_repo_map=True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@@ -706,16 +783,20 @@ class ModelInfoManager:
|
||||
return dict()
|
||||
|
||||
def get_model_info(self, model):
|
||||
if not litellm._lazy_module:
|
||||
info = self.get_model_from_cached_json_db(model)
|
||||
if info:
|
||||
return info
|
||||
cached_info = self.get_model_from_cached_json_db(model)
|
||||
|
||||
# If all else fails, do it the slow way...
|
||||
try:
|
||||
return litellm.get_model_info(model)
|
||||
except Exception:
|
||||
return dict()
|
||||
litellm_info = None
|
||||
if litellm._lazy_module or not cached_info:
|
||||
try:
|
||||
litellm_info = litellm.get_model_info(model)
|
||||
except Exception as ex:
|
||||
if "model_prices_and_context_window.json" not in str(ex):
|
||||
print(str(ex))
|
||||
|
||||
if litellm_info:
|
||||
return litellm_info
|
||||
|
||||
return cached_info
|
||||
|
||||
|
||||
model_info_manager = ModelInfoManager()
|
||||
@@ -799,6 +880,17 @@ class Model(ModelSettings):
|
||||
self.use_temperature = False
|
||||
self.streaming = False
|
||||
|
||||
if (
|
||||
"qwen" in model
|
||||
and "coder" in model
|
||||
and ("2.5" in model or "2-5" in model)
|
||||
and "32b" in model
|
||||
):
|
||||
"openrouter/qwen/qwen-2.5-coder-32b-instruct",
|
||||
self.edit_format = "diff"
|
||||
self.editor_edit_format = "editor-diff"
|
||||
self.use_repo_map = True
|
||||
|
||||
# use the defaults
|
||||
if self.edit_format == "diff":
|
||||
self.use_repo_map = True
|
||||
@@ -985,8 +1077,14 @@ def register_litellm_models(model_fnames):
|
||||
continue
|
||||
|
||||
try:
|
||||
with open(model_fname, "r") as model_def_file:
|
||||
model_def = json5.load(model_def_file)
|
||||
data = Path(model_fname).read_text()
|
||||
if not data.strip():
|
||||
continue
|
||||
model_def = json5.loads(data)
|
||||
if not model_def:
|
||||
continue
|
||||
|
||||
# only load litellm if we have actual data
|
||||
litellm._load_litellm()
|
||||
litellm.register_model(model_def)
|
||||
except Exception as e:
|
||||
@@ -1069,7 +1167,10 @@ def fuzzy_match_models(name):
|
||||
model = model.lower()
|
||||
if attrs.get("mode") != "chat":
|
||||
continue
|
||||
provider = (attrs["litellm_provider"] + "/").lower()
|
||||
provider = attrs.get("litellm_provider", "").lower()
|
||||
if not provider:
|
||||
continue
|
||||
provider += "/"
|
||||
|
||||
if model.startswith(provider):
|
||||
fq_model = model
|
||||
|
||||
@@ -169,7 +169,7 @@ class GitRepo:
|
||||
def get_rel_repo_dir(self):
|
||||
try:
|
||||
return os.path.relpath(self.repo.git_dir, os.getcwd())
|
||||
except ValueError:
|
||||
except (ValueError, OSError):
|
||||
return self.repo.git_dir
|
||||
|
||||
def get_commit_message(self, diffs, context):
|
||||
@@ -331,6 +331,15 @@ class GitRepo:
|
||||
lines,
|
||||
)
|
||||
|
||||
def git_ignored_file(self, path):
|
||||
if not self.repo:
|
||||
return
|
||||
try:
|
||||
if self.repo.ignored(path):
|
||||
return True
|
||||
except ANY_GIT_ERROR:
|
||||
return False
|
||||
|
||||
def ignored_file(self, fname):
|
||||
self.refresh_aider_ignore()
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ from tree_sitter_languages import get_language, get_parser # noqa: E402
|
||||
Tag = namedtuple("Tag", "rel_fname fname line name kind".split())
|
||||
|
||||
|
||||
SQLITE_ERRORS = (sqlite3.OperationalError, sqlite3.DatabaseError)
|
||||
SQLITE_ERRORS = (sqlite3.OperationalError, sqlite3.DatabaseError, OSError)
|
||||
|
||||
|
||||
class RepoMap:
|
||||
@@ -197,7 +197,7 @@ class RepoMap:
|
||||
self.TAGS_CACHE = new_cache
|
||||
return
|
||||
|
||||
except (SQLITE_ERRORS, OSError) as e:
|
||||
except SQLITE_ERRORS as e:
|
||||
# If anything goes wrong, warn and fall back to dict
|
||||
self.io.tool_warning(
|
||||
f"Unable to use tags cache at {path}, falling back to memory cache"
|
||||
@@ -368,6 +368,8 @@ class RepoMap:
|
||||
showing_bar = False
|
||||
|
||||
for fname in fnames:
|
||||
if self.verbose:
|
||||
self.io.tool_output(f"Processing {fname}")
|
||||
if progress and not showing_bar:
|
||||
progress()
|
||||
|
||||
|
||||
11
aider/resources/model-metadata.json
Normal file
11
aider/resources/model-metadata.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"openrouter/qwen/qwen-2.5-coder-32b-instruct": {
|
||||
"max_tokens": 33792,
|
||||
"max_input_tokens": 33792,
|
||||
"max_output_tokens": 33792,
|
||||
"input_cost_per_token": 0.00000018,
|
||||
"output_cost_per_token": 0.00000018,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
},
|
||||
}
|
||||
@@ -2,9 +2,8 @@ import hashlib
|
||||
import json
|
||||
import time
|
||||
|
||||
import backoff
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.exceptions import LiteLLMExceptions
|
||||
from aider.llm import litellm
|
||||
|
||||
# from diskcache import Cache
|
||||
@@ -17,52 +16,6 @@ CACHE = None
|
||||
RETRY_TIMEOUT = 60
|
||||
|
||||
|
||||
def retry_exceptions():
|
||||
import httpx
|
||||
import openai
|
||||
|
||||
return (
|
||||
# httpx
|
||||
httpx.ConnectError,
|
||||
httpx.RemoteProtocolError,
|
||||
httpx.ReadTimeout,
|
||||
#
|
||||
# litellm exceptions inherit from openai exceptions
|
||||
# https://docs.litellm.ai/docs/exception_mapping
|
||||
#
|
||||
# openai.BadRequestError,
|
||||
# litellm.ContextWindowExceededError,
|
||||
# litellm.ContentPolicyViolationError,
|
||||
#
|
||||
# openai.AuthenticationError,
|
||||
# openai.PermissionDeniedError,
|
||||
# openai.NotFoundError,
|
||||
#
|
||||
openai.APITimeoutError,
|
||||
openai.UnprocessableEntityError,
|
||||
openai.RateLimitError,
|
||||
openai.APIConnectionError,
|
||||
# openai.APIError,
|
||||
# openai.APIStatusError,
|
||||
openai.InternalServerError,
|
||||
)
|
||||
|
||||
|
||||
def lazy_litellm_retry_decorator(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
decorated_func = backoff.on_exception(
|
||||
backoff.expo,
|
||||
retry_exceptions(),
|
||||
max_time=RETRY_TIMEOUT,
|
||||
on_backoff=lambda details: print(
|
||||
f"{details.get('exception', 'Exception')}\nRetry in {details['wait']:.1f} seconds."
|
||||
),
|
||||
)(func)
|
||||
return decorated_func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def send_completion(
|
||||
model_name,
|
||||
messages,
|
||||
@@ -104,6 +57,8 @@ def send_completion(
|
||||
|
||||
|
||||
def simple_send_with_retries(model_name, messages, extra_params=None):
|
||||
litellm_ex = LiteLLMExceptions()
|
||||
|
||||
retry_delay = 0.125
|
||||
while True:
|
||||
try:
|
||||
@@ -116,14 +71,27 @@ def simple_send_with_retries(model_name, messages, extra_params=None):
|
||||
}
|
||||
|
||||
_hash, response = send_completion(**kwargs)
|
||||
if not response or not hasattr(response, "choices") or not response.choices:
|
||||
return None
|
||||
return response.choices[0].message.content
|
||||
except retry_exceptions() as err:
|
||||
except litellm_ex.exceptions_tuple() as err:
|
||||
ex_info = litellm_ex.get_ex_info(err)
|
||||
|
||||
print(str(err))
|
||||
retry_delay *= 2
|
||||
if retry_delay > RETRY_TIMEOUT:
|
||||
break
|
||||
if ex_info.description:
|
||||
print(ex_info.description)
|
||||
|
||||
should_retry = ex_info.retry
|
||||
if should_retry:
|
||||
retry_delay *= 2
|
||||
if retry_delay > RETRY_TIMEOUT:
|
||||
should_retry = False
|
||||
|
||||
if not should_retry:
|
||||
return None
|
||||
|
||||
print(f"Retrying in {retry_delay:.1f} seconds...")
|
||||
time.sleep(retry_delay)
|
||||
continue
|
||||
except AttributeError:
|
||||
return
|
||||
return None
|
||||
|
||||
@@ -10,6 +10,10 @@ description: Release notes and stats on aider writing its own code.
|
||||
|
||||
{% include blame.md %}
|
||||
|
||||
The above
|
||||
[stats are based on the git commit history](/docs/faq.html#how-are-the-aider-wrote-xx-of-code-stats-computed)
|
||||
in the aider repo.
|
||||
|
||||
<!--[[[cog
|
||||
# This page is a copy of HISTORY.md, adding the front matter above.
|
||||
text = open("HISTORY.md").read()
|
||||
@@ -21,6 +25,46 @@ cog.out(text)
|
||||
|
||||
### main branch
|
||||
|
||||
- Fixed bug in fuzzy model name matching when litellm provider info is missing.
|
||||
- Modified model metadata file loading to allow override of resource file.
|
||||
- Allow recursive loading of dirs using `--read`.
|
||||
- Updated dependency versions to pick up litellm fix for ollama models.
|
||||
- Added exponential backoff retry when writing files to handle editor file locks.
|
||||
- Updated Qwen 2.5 Coder 32B model configuration.
|
||||
|
||||
### Aider v0.63.1
|
||||
|
||||
- Fixed bug in git ignored file handling.
|
||||
- Improved error handling for git operations.
|
||||
|
||||
### Aider v0.63.0
|
||||
|
||||
- Support for Qwen 2.5 Coder 32B.
|
||||
- `/web` command just adds the page to the chat, without triggering an LLM response.
|
||||
- Improved prompting for the user's preferred chat language.
|
||||
- Improved handling of LiteLLM exceptions.
|
||||
- Bugfix for double-counting tokens when reporting cache stats.
|
||||
- Bugfix for the LLM creating new files.
|
||||
- Other small bug fixes.
|
||||
- Aider wrote 55% of the code in this release.
|
||||
|
||||
### Aider v0.62.0
|
||||
|
||||
- Full support for Claude 3.5 Haiku
|
||||
- Scored 75% on [aider's code editing leaderboard](https://aider.chat/docs/leaderboards/).
|
||||
- Almost as good as Sonnet at much lower cost.
|
||||
- Launch with `--haiku` to use it.
|
||||
- Easily apply file edits from ChatGPT, Claude or other web apps
|
||||
- Chat with ChatGPT or Claude via their web app.
|
||||
- Give it your source files and ask for the changes you want.
|
||||
- Use the web app's "copy response" button to copy the entire reply from the LLM.
|
||||
- Run `aider --apply-clipboard-edits file-to-edit.js`.
|
||||
- Aider will edit your file with the LLM's changes.
|
||||
- Bugfix for creating new files.
|
||||
- Aider wrote 84% of the code in this release.
|
||||
|
||||
### Aider v0.61.0
|
||||
|
||||
- Load and save aider slash-commands to files:
|
||||
- `/save <fname>` command will make a file of `/add` and `/read-only` commands that recreate the current file context in the chat.
|
||||
- `/load <fname>` will replay the commands in the file.
|
||||
|
||||
@@ -2819,3 +2819,163 @@
|
||||
fry69: 18
|
||||
start_tag: v0.59.0
|
||||
total_lines: 285
|
||||
- aider_percentage: 67.61
|
||||
aider_total: 860
|
||||
end_date: '2024-11-01'
|
||||
end_tag: v0.61.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/analytics.py:
|
||||
Paul Gauthier: 75
|
||||
Paul Gauthier (aider): 89
|
||||
aider/args.py:
|
||||
Paul Gauthier: 5
|
||||
Paul Gauthier (aider): 29
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier: 56
|
||||
Paul Gauthier (aider): 43
|
||||
aider/coders/editblock_coder.py:
|
||||
Paul Gauthier: 14
|
||||
aider/commands.py:
|
||||
Paul Gauthier: 14
|
||||
Paul Gauthier (aider): 86
|
||||
aider/io.py:
|
||||
Paul Gauthier: 12
|
||||
Paul Gauthier (aider): 32
|
||||
aider/linter.py:
|
||||
Paul Gauthier: 6
|
||||
aider/main.py:
|
||||
Paul Gauthier: 48
|
||||
Paul Gauthier (aider): 10
|
||||
aider/models.py:
|
||||
Paul Gauthier: 54
|
||||
Paul Gauthier (aider): 63
|
||||
kAIto47802: 4
|
||||
aider/repomap.py:
|
||||
Paul Gauthier: 12
|
||||
Paul Gauthier (aider): 52
|
||||
aider/sendchat.py:
|
||||
Paul Gauthier: 23
|
||||
Paul Gauthier (aider): 23
|
||||
aider/urls.py:
|
||||
Paul Gauthier: 2
|
||||
aider/utils.py:
|
||||
Paul Gauthier (aider): 6
|
||||
scripts/issues.py:
|
||||
Paul Gauthier (aider): 13
|
||||
scripts/pip-compile.sh:
|
||||
Paul Gauthier (aider): 13
|
||||
scripts/update-docs.sh:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 5
|
||||
tests/basic/test_analytics.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 99
|
||||
tests/basic/test_commands.py:
|
||||
Konstantin L: 34
|
||||
Paul Gauthier: 45
|
||||
Paul Gauthier (aider): 267
|
||||
tests/basic/test_io.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 4
|
||||
tests/basic/test_main.py:
|
||||
Paul Gauthier (aider): 3
|
||||
tests/basic/test_models.py:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 9
|
||||
tests/basic/test_sanity_check_repo.py:
|
||||
Paul Gauthier (aider): 6
|
||||
tests/basic/test_sendchat.py:
|
||||
Paul Gauthier (aider): 8
|
||||
grand_total:
|
||||
Konstantin L: 34
|
||||
Paul Gauthier: 374
|
||||
Paul Gauthier (aider): 860
|
||||
kAIto47802: 4
|
||||
start_tag: v0.60.0
|
||||
total_lines: 1272
|
||||
- aider_percentage: 84.0
|
||||
aider_total: 63
|
||||
end_date: '2024-11-04'
|
||||
end_tag: v0.62.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/args.py:
|
||||
Paul Gauthier (aider): 14
|
||||
aider/coders/editblock_coder.py:
|
||||
Paul Gauthier: 6
|
||||
aider/main.py:
|
||||
Paul Gauthier (aider): 4
|
||||
aider/models.py:
|
||||
Paul Gauthier: 5
|
||||
Paul Gauthier (aider): 45
|
||||
grand_total:
|
||||
Paul Gauthier: 12
|
||||
Paul Gauthier (aider): 63
|
||||
start_tag: v0.61.0
|
||||
total_lines: 75
|
||||
- aider_percentage: 55.16
|
||||
aider_total: 385
|
||||
end_date: '2024-11-13'
|
||||
end_tag: v0.63.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/coders/architect_coder.py:
|
||||
Paul Gauthier: 3
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier: 42
|
||||
Paul Gauthier (aider): 1
|
||||
aider/coders/editblock_coder.py:
|
||||
Paul Gauthier: 4
|
||||
aider/commands.py:
|
||||
Paul Gauthier: 13
|
||||
aider/exceptions.py:
|
||||
Paul Gauthier: 72
|
||||
Paul Gauthier (aider): 4
|
||||
aider/io.py:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 23
|
||||
aider/main.py:
|
||||
Paul Gauthier: 9
|
||||
Paul Gauthier (aider): 9
|
||||
aider/models.py:
|
||||
Logan Attwood: 29
|
||||
Paul Gauthier: 50
|
||||
Paul Gauthier (aider): 7
|
||||
aider/repo.py:
|
||||
Paul Gauthier: 7
|
||||
aider/repomap.py:
|
||||
Paul Gauthier: 4
|
||||
aider/sendchat.py:
|
||||
Paul Gauthier: 17
|
||||
Paul Gauthier (aider): 4
|
||||
scripts/issues.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 195
|
||||
tests/basic/test_coder.py:
|
||||
Paul Gauthier: 2
|
||||
tests/basic/test_commands.py:
|
||||
Paul Gauthier (aider): 20
|
||||
tests/basic/test_editblock.py:
|
||||
Paul Gauthier: 41
|
||||
tests/basic/test_exceptions.py:
|
||||
Paul Gauthier (aider): 65
|
||||
tests/basic/test_main.py:
|
||||
Paul Gauthier: 1
|
||||
tests/basic/test_sanity_check_repo.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 2
|
||||
tests/basic/test_sendchat.py:
|
||||
Paul Gauthier: 8
|
||||
Paul Gauthier (aider): 55
|
||||
tests/scrape/test_scrape.py:
|
||||
Paul Gauthier: 1
|
||||
grand_total:
|
||||
Logan Attwood: 29
|
||||
Paul Gauthier: 284
|
||||
Paul Gauthier (aider): 385
|
||||
start_tag: v0.62.0
|
||||
total_lines: 698
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
versions: 0.30.2-dev
|
||||
seconds_per_case: 32.4
|
||||
total_cost: 13.8395
|
||||
|
||||
|
||||
- dirname: 2024-03-06-16-42-00--claude3-sonnet-whole
|
||||
test_cases: 133
|
||||
model: claude-3-sonnet-20240229
|
||||
@@ -43,7 +43,7 @@
|
||||
versions: 0.25.1-dev
|
||||
seconds_per_case: 23.1
|
||||
total_cost: 0.0000
|
||||
|
||||
|
||||
- dirname: 2024-05-03-20-47-24--gemini-1.5-pro-diff-fenced
|
||||
test_cases: 133
|
||||
model: gemini-1.5-pro-latest
|
||||
@@ -88,7 +88,7 @@
|
||||
versions: 0.33.1-dev
|
||||
seconds_per_case: 6.5
|
||||
total_cost: 0.5032
|
||||
|
||||
|
||||
- dirname: 2023-11-06-21-23-59--gpt-3.5-turbo-0301
|
||||
test_cases: 133
|
||||
model: gpt-3.5-turbo-0301
|
||||
@@ -111,7 +111,7 @@
|
||||
versions: 0.16.4-dev
|
||||
seconds_per_case: 6.5
|
||||
total_cost: 0.4822
|
||||
|
||||
|
||||
- dirname: 2023-11-07-02-41-07--gpt-3.5-turbo-0613
|
||||
test_cases: 133
|
||||
model: gpt-3.5-turbo-0613
|
||||
@@ -155,7 +155,7 @@
|
||||
versions: 0.30.2-dev
|
||||
seconds_per_case: 5.3
|
||||
total_cost: 0.3261
|
||||
|
||||
|
||||
- dirname: 2024-01-25-23-37-15--jan-exercism-gpt-4-0125-preview-udiff
|
||||
test_cases: 133
|
||||
model: gpt-4-0125-preview
|
||||
@@ -178,7 +178,7 @@
|
||||
versions: 0.22.1-dev
|
||||
seconds_per_case: 44.8
|
||||
total_cost: 14.6428
|
||||
|
||||
|
||||
- dirname: 2024-05-04-15-07-30--redo-gpt-4-0314-diff-reminder-rules
|
||||
test_cases: 133
|
||||
model: gpt-4-0314
|
||||
@@ -201,7 +201,7 @@
|
||||
versions: 0.31.2-dev
|
||||
seconds_per_case: 19.8
|
||||
total_cost: 16.2689
|
||||
|
||||
|
||||
- dirname: 2023-12-16-21-24-28--editblock-gpt-4-0613-actual-main
|
||||
test_cases: 133
|
||||
model: gpt-4-0613
|
||||
@@ -228,7 +228,7 @@
|
||||
- dirname: 2024-05-08-21-16-03--may-gpt-4-1106-preview-udiff
|
||||
test_cases: 133
|
||||
model: gpt-4-1106-preview
|
||||
released: 2023-11-06
|
||||
released: 2023-11-06
|
||||
edit_format: udiff
|
||||
commit_hash: 87664dc
|
||||
pass_rate_1: 51.9
|
||||
@@ -247,7 +247,7 @@
|
||||
versions: 0.33.1-dev
|
||||
seconds_per_case: 20.4
|
||||
total_cost: 6.6061
|
||||
|
||||
|
||||
- dirname: 2024-05-01-02-09-20--gpt-4-turbo-examples
|
||||
test_cases: 133
|
||||
model: gpt-4-turbo-2024-04-09 (udiff)
|
||||
@@ -270,7 +270,7 @@
|
||||
versions: 0.30.2-dev
|
||||
seconds_per_case: 22.8
|
||||
total_cost: 6.3337
|
||||
|
||||
|
||||
- dirname: 2024-05-03-22-24-48--openrouter--llama3-diff-examples-sys-msg
|
||||
test_cases: 132
|
||||
model: llama3-70b-8192
|
||||
@@ -293,7 +293,7 @@
|
||||
versions: 0.31.2-dev
|
||||
seconds_per_case: 14.5
|
||||
total_cost: 0.4311
|
||||
|
||||
|
||||
- dirname: 2024-05-06-18-31-08--command-r-plus-whole-final
|
||||
test_cases: 133
|
||||
model: command-r-plus
|
||||
@@ -316,11 +316,11 @@
|
||||
versions: 0.31.2-dev
|
||||
seconds_per_case: 22.9
|
||||
total_cost: 2.7494
|
||||
|
||||
|
||||
- dirname: 2024-05-07-20-32-37--qwen1.5-110b-chat-whole
|
||||
test_cases: 133
|
||||
model: qwen1.5-110b-chat
|
||||
released: 2024-02-04
|
||||
released: 2024-02-04
|
||||
edit_format: whole
|
||||
commit_hash: 70b1c0c
|
||||
pass_rate_1: 30.8
|
||||
@@ -339,7 +339,7 @@
|
||||
versions: 0.31.2-dev
|
||||
seconds_per_case: 46.9
|
||||
total_cost: 0.0000
|
||||
|
||||
|
||||
- dirname: 2024-05-07-20-57-04--wizardlm-2-8x22b-whole
|
||||
test_cases: 133
|
||||
model: WizardLM-2 8x22B
|
||||
@@ -384,7 +384,7 @@
|
||||
versions: 0.34.1-dev
|
||||
seconds_per_case: 6.0
|
||||
total_cost: 0.0000
|
||||
|
||||
|
||||
- dirname: 2024-04-12-22-18-20--gpt-4-turbo-2024-04-09-plain-diff
|
||||
test_cases: 33
|
||||
model: gpt-4-turbo-2024-04-09 (diff)
|
||||
@@ -568,7 +568,7 @@
|
||||
versions: 0.42.1-dev
|
||||
seconds_per_case: 17.6
|
||||
total_cost: 3.6346
|
||||
|
||||
|
||||
- dirname: 2024-07-01-21-41-48--haiku-whole
|
||||
test_cases: 133
|
||||
model: claude-3-haiku-20240307
|
||||
@@ -1131,7 +1131,7 @@
|
||||
versions: 0.56.1.dev
|
||||
seconds_per_case: 80.9
|
||||
total_cost: 63.9190
|
||||
|
||||
|
||||
- dirname: 2024-09-19-16-58-29--qwen2.5-coder:7b-instruct-q8_0
|
||||
test_cases: 133
|
||||
model: qwen2.5-coder:7b-instruct-q8_0
|
||||
@@ -1154,7 +1154,7 @@
|
||||
versions: 0.56.0
|
||||
seconds_per_case: 9.3
|
||||
total_cost: 0.0000
|
||||
|
||||
|
||||
- dirname: 2024-09-20-20-20-19--qwen-2.5-72b-instruct-diff
|
||||
test_cases: 133
|
||||
model: qwen-2.5-72b-instruct (bf16)
|
||||
@@ -1458,7 +1458,7 @@
|
||||
versions: 0.58.1.dev
|
||||
seconds_per_case: 63.7
|
||||
total_cost: 0.0000
|
||||
|
||||
|
||||
- dirname: 2024-10-01-16-50-09--hermes3-whole-4
|
||||
test_cases: 133
|
||||
model: ollama/hermes3
|
||||
@@ -1610,4 +1610,211 @@
|
||||
date: 2024-10-22
|
||||
versions: 0.59.2.dev
|
||||
seconds_per_case: 18.6
|
||||
total_cost: 0.0000
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2024-11-04-19-19-32--haiku35-diff-ex-as-sys-false
|
||||
test_cases: 133
|
||||
model: claude-3-5-haiku-20241022
|
||||
edit_format: diff
|
||||
commit_hash: 03bbdb0-dirty
|
||||
pass_rate_1: 61.7
|
||||
pass_rate_2: 75.2
|
||||
percent_cases_well_formed: 95.5
|
||||
error_outputs: 11
|
||||
num_malformed_responses: 11
|
||||
num_with_malformed_responses: 6
|
||||
user_asks: 1
|
||||
lazy_comments: 1
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
command: aider --model anthropic/claude-3-5-haiku-20241022
|
||||
date: 2024-11-04
|
||||
versions: 0.61.1.dev
|
||||
seconds_per_case: 18.4
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2024-11-07-06-15-36--Qwen2.5.1-Coder-7B-Instruct-GGUF:Q8_0-32k-whole
|
||||
test_cases: 133
|
||||
model: ollama/Qwen2.5.1-Coder-7B-Instruct-GGUF:Q8_0-32k
|
||||
edit_format: whole
|
||||
commit_hash: e76704e
|
||||
pass_rate_1: 52.6
|
||||
pass_rate_2: 63.9
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 0
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 4
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 1
|
||||
command: aider --model ollama/Qwen2.5.1-Coder-7B-Instruct-GGUF:Q8_0-32k
|
||||
date: 2024-11-07
|
||||
versions: 0.59.2.dev
|
||||
seconds_per_case: 18.2
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2024-10-29-00-29-09--Qwen2.5-Coder-0.5B-Instruct
|
||||
test_cases: 133
|
||||
model: Qwen2.5-Coder-0.5B-Instruct
|
||||
edit_format: whole
|
||||
commit_hash: 58bd375
|
||||
pass_rate_1: 14.3
|
||||
pass_rate_2: 14.3
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 20
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 45
|
||||
lazy_comments: 0
|
||||
syntax_errors: 2
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 20
|
||||
test_timeouts: 2
|
||||
command: aider --model openai/Qwen2.5-Coder-0.5B-Instruct
|
||||
date: 2024-10-29
|
||||
versions: 0.59.2.dev
|
||||
seconds_per_case: 16.0
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2024-11-11-19-37-01--Qwen2.5-Coder-1.5B-Instruct
|
||||
test_cases: 133
|
||||
model: Qwen2.5-Coder-1.5B-Instruct
|
||||
edit_format: whole
|
||||
commit_hash: bb5681c
|
||||
pass_rate_1: 28.6
|
||||
pass_rate_2: 31.6
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 5
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 13
|
||||
lazy_comments: 2
|
||||
syntax_errors: 1
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 5
|
||||
test_timeouts: 2
|
||||
command: aider --model openai/Qwen2.5-Coder-1.5B-Instruct
|
||||
date: 2024-11-11
|
||||
versions: 0.59.2.dev
|
||||
seconds_per_case: 27.4
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2024-11-04-02-25-32--Qwen2.5-Coder-3B-Instruct
|
||||
test_cases: 133
|
||||
model: Qwen2.5-Coder-3B-Instruct
|
||||
edit_format: whole
|
||||
commit_hash: 0ba3647
|
||||
pass_rate_1: 33.8
|
||||
pass_rate_2: 39.1
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 4
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 3
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 4
|
||||
test_timeouts: 6
|
||||
command: aider --model openai/Qwen2.5-Coder-3B-Instruct
|
||||
date: 2024-11-04
|
||||
versions: 0.59.2.dev
|
||||
seconds_per_case: 18.7
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2024-10-16-16-20-59--Qwen2.5-Coder-7B-Instruct
|
||||
test_cases: 133
|
||||
model: Qwen2.5-Coder-7B-Instruct
|
||||
edit_format: whole
|
||||
commit_hash: 92fe979-dirty
|
||||
pass_rate_1: 51.9
|
||||
pass_rate_2: 57.9
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 2
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 2
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 2
|
||||
test_timeouts: 5
|
||||
command: aider --model openai/Qwen2.5-Coder-7B-Instruct
|
||||
date: 2024-10-16
|
||||
versions: 0.59.2.dev
|
||||
seconds_per_case: 10.5
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2024-10-29-11-53-39--Qwen2.5-Coder-14B-Instruct
|
||||
test_cases: 133
|
||||
model: Qwen2.5-Coder-14B-Instruct
|
||||
edit_format: whole
|
||||
commit_hash: 58bd375
|
||||
pass_rate_1: 58.6
|
||||
pass_rate_2: 69.2
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 3
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 2
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 3
|
||||
test_timeouts: 0
|
||||
command: aider --model openai/Qwen2.5-Coder-14B-Instruct
|
||||
date: 2024-10-29
|
||||
versions: 0.59.2.dev
|
||||
seconds_per_case: 18.3
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2024-11-09-10-57-11--Qwen2.5-Coder-32B-Instruct
|
||||
test_cases: 133
|
||||
model: Qwen2.5-Coder-32B-Instruct (whole)
|
||||
edit_format: whole
|
||||
commit_hash: ec9982a
|
||||
pass_rate_1: 60.9
|
||||
pass_rate_2: 73.7
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 1
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 1
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 1
|
||||
command: aider --model openai/Qwen2.5-Coder-32B-Instruct
|
||||
date: 2024-11-09
|
||||
versions: 0.59.2.dev
|
||||
seconds_per_case: 26.6
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2024-11-09-11-09-15--Qwen2.5-Coder-32B-Instruct
|
||||
test_cases: 133
|
||||
model: Qwen2.5-Coder-32B-Instruct (diff)
|
||||
edit_format: diff
|
||||
commit_hash: ec9982a
|
||||
pass_rate_1: 59.4
|
||||
pass_rate_2: 71.4
|
||||
percent_cases_well_formed: 94.7
|
||||
error_outputs: 17
|
||||
num_malformed_responses: 17
|
||||
num_with_malformed_responses: 7
|
||||
user_asks: 1
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 3
|
||||
command: aider --model openai/Qwen2.5-Coder-32B-Instruct
|
||||
date: 2024-11-09
|
||||
versions: 0.59.2.dev
|
||||
seconds_per_case: 22.5
|
||||
total_cost: 0.0000
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
<canvas id="linesChart" width="800" height="360" style="margin-top: 20px"></canvas>
|
||||
<canvas id="blameChart" width="800" height="360" style="margin-top: 20px"></canvas>
|
||||
<canvas id="linesChart" width="800" height="360" style="margin-top: 20px"></canvas>
|
||||
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/moment"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/chartjs-adapter-moment"></script>
|
||||
|
||||
@@ -12,6 +12,10 @@ nav_exclude: true
|
||||
|
||||
[](https://aider.chat/assets/self-assembly.jpg)
|
||||
|
||||
{: .note }
|
||||
This article is quite out dated. For current statistics, see
|
||||
[aider's release history](/HISTORY.html).
|
||||
|
||||
The
|
||||
[aider git repo](https://github.com/Aider-AI/aider)
|
||||
currently contains about 4K commits and 14K lines of code.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -32,6 +32,9 @@
|
||||
## Use claude-3-5-sonnet-20241022 model for the main chat
|
||||
#sonnet: false
|
||||
|
||||
## Use claude-3-5-haiku-20241022 model for the main chat
|
||||
#haiku: false
|
||||
|
||||
## Use gpt-4-0613 model for the main chat
|
||||
#4: false
|
||||
|
||||
@@ -320,6 +323,9 @@
|
||||
## Apply the changes from the given file instead of running the chat (debug)
|
||||
#apply: xxx
|
||||
|
||||
## Apply clipboard contents as edits using the main model's editor format
|
||||
#apply-clipboard-edits: false
|
||||
|
||||
## Always say yes to every confirmation
|
||||
#yes-always: false
|
||||
|
||||
|
||||
@@ -36,6 +36,9 @@
|
||||
## Use claude-3-5-sonnet-20241022 model for the main chat
|
||||
#AIDER_SONNET=
|
||||
|
||||
## Use claude-3-5-haiku-20241022 model for the main chat
|
||||
#AIDER_HAIKU=
|
||||
|
||||
## Use gpt-4-0613 model for the main chat
|
||||
#AIDER_4=
|
||||
|
||||
@@ -306,6 +309,9 @@
|
||||
## Apply the changes from the given file instead of running the chat (debug)
|
||||
#AIDER_APPLY=
|
||||
|
||||
## Apply clipboard contents as edits using the main model's editor format
|
||||
#AIDER_APPLY_CLIPBOARD_EDITS=false
|
||||
|
||||
## Always say yes to every confirmation
|
||||
#AIDER_YES_ALWAYS=
|
||||
|
||||
|
||||
@@ -400,7 +400,7 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: claude-3-haiku-20240307
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
- cache_control: false
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
@@ -416,7 +416,7 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: openrouter/anthropic/claude-3-haiku
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
- cache_control: false
|
||||
caches_by_default: false
|
||||
edit_format: whole
|
||||
@@ -432,7 +432,7 @@ cog.out("```\n")
|
||||
use_repo_map: false
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: claude-3-haiku-20240307
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
- cache_control: true
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
@@ -451,7 +451,7 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: claude-3-haiku-20240307
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
- cache_control: true
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
@@ -470,7 +470,7 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: anthropic/claude-3-haiku-20240307
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
- cache_control: true
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
@@ -489,7 +489,26 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: anthropic/claude-3-haiku-20240307
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
- cache_control: true
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
editor_edit_format: editor-diff
|
||||
editor_model_name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31
|
||||
max_tokens: 8192
|
||||
lazy: false
|
||||
name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
reminder: user
|
||||
send_undo_reply: false
|
||||
streaming: true
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
- cache_control: true
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
@@ -508,7 +527,7 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: anthropic/claude-3-haiku-20240307
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
- cache_control: true
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
@@ -527,7 +546,7 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: claude-3-haiku-20240307
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
- cache_control: true
|
||||
caches_by_default: false
|
||||
edit_format: whole
|
||||
@@ -546,6 +565,77 @@ cog.out("```\n")
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: anthropic/claude-3-haiku-20240307
|
||||
- cache_control: true
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
editor_edit_format: null
|
||||
editor_model_name: null
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31
|
||||
lazy: false
|
||||
name: anthropic/claude-3-5-haiku-20241022
|
||||
reminder: user
|
||||
send_undo_reply: false
|
||||
streaming: true
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
- cache_control: true
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
editor_edit_format: null
|
||||
editor_model_name: null
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31
|
||||
lazy: false
|
||||
name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
reminder: user
|
||||
send_undo_reply: false
|
||||
streaming: true
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
- cache_control: true
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
editor_edit_format: null
|
||||
editor_model_name: null
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31
|
||||
lazy: false
|
||||
name: claude-3-5-haiku-20241022
|
||||
reminder: user
|
||||
send_undo_reply: false
|
||||
streaming: true
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
- cache_control: false
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
editor_edit_format: null
|
||||
editor_model_name: null
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
max_tokens: 4096
|
||||
lazy: false
|
||||
name: vertex_ai/claude-3-5-haiku@20241022
|
||||
reminder: user
|
||||
send_undo_reply: false
|
||||
streaming: true
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
- cache_control: true
|
||||
caches_by_default: false
|
||||
edit_format: whole
|
||||
@@ -580,7 +670,7 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: openrouter/anthropic/claude-3-haiku
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
- cache_control: true
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
@@ -597,7 +687,7 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: openrouter/anthropic/claude-3-haiku:beta
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku:beta
|
||||
- cache_control: false
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
@@ -614,7 +704,7 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: vertex_ai/claude-3-haiku@20240307
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
- cache_control: false
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
@@ -631,7 +721,7 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: vertex_ai/claude-3-haiku@20240307
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
- cache_control: false
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
@@ -647,7 +737,7 @@ cog.out("```\n")
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: vertex_ai/claude-3-haiku@20240307
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
- cache_control: false
|
||||
caches_by_default: false
|
||||
edit_format: whole
|
||||
@@ -663,7 +753,7 @@ cog.out("```\n")
|
||||
use_repo_map: false
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: vertex_ai/claude-3-haiku@20240307
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
- cache_control: false
|
||||
caches_by_default: false
|
||||
edit_format: whole
|
||||
@@ -824,6 +914,22 @@ cog.out("```\n")
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: null
|
||||
- cache_control: false
|
||||
caches_by_default: false
|
||||
edit_format: diff-fenced
|
||||
editor_edit_format: null
|
||||
editor_model_name: null
|
||||
examples_as_sys_msg: false
|
||||
extra_params: null
|
||||
lazy: false
|
||||
name: vertex_ai/gemini-pro-experimental
|
||||
reminder: user
|
||||
send_undo_reply: false
|
||||
streaming: true
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: null
|
||||
- cache_control: false
|
||||
caches_by_default: false
|
||||
edit_format: whole
|
||||
@@ -1068,6 +1174,22 @@ cog.out("```\n")
|
||||
use_system_prompt: false
|
||||
use_temperature: false
|
||||
weak_model_name: openrouter/openai/gpt-4o-mini
|
||||
- cache_control: false
|
||||
caches_by_default: false
|
||||
edit_format: diff
|
||||
editor_edit_format: editor-diff
|
||||
editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
|
||||
examples_as_sys_msg: false
|
||||
extra_params: null
|
||||
lazy: false
|
||||
name: openrouter/qwen/qwen-2.5-coder-32b-instruct
|
||||
reminder: user
|
||||
send_undo_reply: false
|
||||
streaming: true
|
||||
use_repo_map: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
|
||||
```
|
||||
<!--[[[end]]]-->
|
||||
|
||||
|
||||
@@ -88,6 +88,9 @@ cog.outl("```")
|
||||
## Use claude-3-5-sonnet-20241022 model for the main chat
|
||||
#sonnet: false
|
||||
|
||||
## Use claude-3-5-haiku-20241022 model for the main chat
|
||||
#haiku: false
|
||||
|
||||
## Use gpt-4-0613 model for the main chat
|
||||
#4: false
|
||||
|
||||
@@ -376,6 +379,9 @@ cog.outl("```")
|
||||
## Apply the changes from the given file instead of running the chat (debug)
|
||||
#apply: xxx
|
||||
|
||||
## Apply clipboard contents as edits using the main model's editor format
|
||||
#apply-clipboard-edits: false
|
||||
|
||||
## Always say yes to every confirmation
|
||||
#yes-always: false
|
||||
|
||||
|
||||
@@ -78,6 +78,9 @@ cog.outl("```")
|
||||
## Use claude-3-5-sonnet-20241022 model for the main chat
|
||||
#AIDER_SONNET=
|
||||
|
||||
## Use claude-3-5-haiku-20241022 model for the main chat
|
||||
#AIDER_HAIKU=
|
||||
|
||||
## Use gpt-4-0613 model for the main chat
|
||||
#AIDER_4=
|
||||
|
||||
@@ -348,6 +351,9 @@ cog.outl("```")
|
||||
## Apply the changes from the given file instead of running the chat (debug)
|
||||
#AIDER_APPLY=
|
||||
|
||||
## Apply clipboard contents as edits using the main model's editor format
|
||||
#AIDER_APPLY_CLIPBOARD_EDITS=false
|
||||
|
||||
## Always say yes to every confirmation
|
||||
#AIDER_YES_ALWAYS=
|
||||
|
||||
|
||||
@@ -26,12 +26,12 @@ cog.out(get_md_help())
|
||||
]]]-->
|
||||
```
|
||||
usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
|
||||
[--opus] [--sonnet] [--4] [--4o] [--mini] [--4-turbo]
|
||||
[--35turbo] [--deepseek] [--o1-mini] [--o1-preview]
|
||||
[--list-models] [--openai-api-base] [--openai-api-type]
|
||||
[--openai-api-version] [--openai-api-deployment-id]
|
||||
[--openai-organization-id] [--model-settings-file]
|
||||
[--model-metadata-file]
|
||||
[--opus] [--sonnet] [--haiku] [--4] [--4o] [--mini]
|
||||
[--4-turbo] [--35turbo] [--deepseek] [--o1-mini]
|
||||
[--o1-preview] [--list-models] [--openai-api-base]
|
||||
[--openai-api-type] [--openai-api-version]
|
||||
[--openai-api-deployment-id] [--openai-organization-id]
|
||||
[--model-settings-file] [--model-metadata-file]
|
||||
[--verify-ssl | --no-verify-ssl] [--edit-format]
|
||||
[--architect] [--weak-model] [--editor-model]
|
||||
[--editor-edit-format]
|
||||
@@ -67,9 +67,9 @@ usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
|
||||
[--chat-language] [--version] [--just-check-update]
|
||||
[--check-update | --no-check-update]
|
||||
[--install-main-branch] [--upgrade] [--apply]
|
||||
[--yes-always] [-v] [--show-repo-map] [--show-prompts]
|
||||
[--exit] [--message] [--message-file] [--load]
|
||||
[--encoding] [-c]
|
||||
[--apply-clipboard-edits] [--yes-always] [-v]
|
||||
[--show-repo-map] [--show-prompts] [--exit] [--message]
|
||||
[--message-file] [--load] [--encoding] [-c]
|
||||
[--gui | --no-gui | --browser | --no-browser]
|
||||
[--suggest-shell-commands | --no-suggest-shell-commands]
|
||||
[--fancy-input | --no-fancy-input] [--voice-format]
|
||||
@@ -107,6 +107,10 @@ Environment variable: `AIDER_OPUS`
|
||||
Use claude-3-5-sonnet-20241022 model for the main chat
|
||||
Environment variable: `AIDER_SONNET`
|
||||
|
||||
### `--haiku`
|
||||
Use claude-3-5-haiku-20241022 model for the main chat
|
||||
Environment variable: `AIDER_HAIKU`
|
||||
|
||||
### `--4`
|
||||
Use gpt-4-0613 model for the main chat
|
||||
Environment variable: `AIDER_4`
|
||||
@@ -574,6 +578,11 @@ Aliases:
|
||||
Apply the changes from the given file instead of running the chat (debug)
|
||||
Environment variable: `AIDER_APPLY`
|
||||
|
||||
### `--apply-clipboard-edits`
|
||||
Apply clipboard contents as edits using the main model's editor format
|
||||
Default: False
|
||||
Environment variable: `AIDER_APPLY_CLIPBOARD_EDITS`
|
||||
|
||||
### `--yes-always`
|
||||
Always say yes to every confirmation
|
||||
Environment variable: `AIDER_YES_ALWAYS`
|
||||
|
||||
@@ -30,7 +30,7 @@ current chat to build a compact
|
||||
Adding a bunch of files that are mostly irrelevant to the
|
||||
task at hand will often distract or confuse the LLM.
|
||||
The LLM will give worse coding results, and sometimese even fail to correctly edit files.
|
||||
Addings extra files will also increase the token costs on your OpenAI invoice.
|
||||
Addings extra files will also increase your token costs.
|
||||
|
||||
Again, it's usually best to just add the files to the chat that will need to be modified.
|
||||
If you still wish to add lots of files to the chat, you can:
|
||||
@@ -150,7 +150,6 @@ python -m aider
|
||||
|
||||
|
||||
|
||||
|
||||
## Can I change the system prompts that aider uses?
|
||||
|
||||
Aider is set up to support different system prompts and edit formats
|
||||
@@ -191,6 +190,16 @@ You can also refer to the
|
||||
[instructions for installing a development version of aider](https://aider.chat/docs/install/optional.html#install-the-development-version-of-aider).
|
||||
|
||||
|
||||
## How are the "aider wrote xx% of code" stats computed?
|
||||
|
||||
[Aider is tightly integrated with git](/docs/git.html) so all
|
||||
one of aider's code changes are committed to the repo with proper attribution.
|
||||
The
|
||||
[stats are computed](https://github.com/Aider-AI/aider/blob/main/scripts/blame.py)
|
||||
by doing something like `git blame` on the repo,
|
||||
and counting up who wrote all the new lines of code in each release.
|
||||
Only lines in source code files are counted, not documentation or prompt files.
|
||||
|
||||
## Can I share my aider chat transcript?
|
||||
|
||||
Yes, you can now share aider chat logs in a pretty way.
|
||||
@@ -213,6 +222,15 @@ This will give you a URL like this, which shows the chat history like you'd see
|
||||
https://aider.chat/share/?mdurl=https://gist.github.com/Aider-AI/2087ab8b64034a078c0a209440ac8be0
|
||||
```
|
||||
|
||||
## Can I edit files myself while aider is running?
|
||||
|
||||
Yes. Aider always reads the latest copy of files from the file
|
||||
system when you send each message.
|
||||
|
||||
While you're waiting for aider's reply to complete, it's probably unwise to
|
||||
edit files that you've added to the chat.
|
||||
Your edits and aider's edits might conflict.
|
||||
|
||||
## What is Aider AI LLC?
|
||||
|
||||
Aider AI LLC is the company behind the aider AI coding tool.
|
||||
@@ -222,11 +240,5 @@ under an
|
||||
[Apache 2.0 license](https://github.com/Aider-AI/aider/blob/main/LICENSE.txt).
|
||||
|
||||
|
||||
## Can I edit files myself while aider is running?
|
||||
<div style="height:80vh"></div>
|
||||
|
||||
Yes. Aider always reads the latest copy of files from the file
|
||||
system when you send each message.
|
||||
|
||||
While you're waiting for aider's reply to complete, it's probably unwise to
|
||||
edit files that you've added to the chat.
|
||||
Your edits and aider's edits might conflict.
|
||||
|
||||
@@ -60,13 +60,20 @@ The model also has to successfully apply all its changes to the source file with
|
||||
<script>
|
||||
document.addEventListener('DOMContentLoaded', function () {
|
||||
var ctx = document.getElementById('editChart').getContext('2d');
|
||||
const HIGHTLIGHT_MODEL = 'no no no no';
|
||||
var leaderboardData = {
|
||||
labels: [],
|
||||
datasets: [{
|
||||
label: 'Percent completed correctly',
|
||||
data: [],
|
||||
backgroundColor: 'rgba(54, 162, 235, 0.2)',
|
||||
borderColor: 'rgba(54, 162, 235, 1)',
|
||||
backgroundColor: function(context) {
|
||||
const label = context.chart.data.labels[context.dataIndex] || '';
|
||||
return (label && label.includes(HIGHTLIGHT_MODEL)) ? 'rgba(255, 99, 132, 0.2)' : 'rgba(54, 162, 235, 0.2)';
|
||||
},
|
||||
borderColor: function(context) {
|
||||
const label = context.chart.data.labels[context.dataIndex] || '';
|
||||
return (label && label.includes(HIGHTLIGHT_MODEL)) ? 'rgba(255, 99, 132, 1)' : 'rgba(54, 162, 235, 1)';
|
||||
},
|
||||
borderWidth: 1
|
||||
}]
|
||||
};
|
||||
@@ -117,14 +124,9 @@ The model also has to successfully apply all its changes to the source file with
|
||||
data: leaderboardData,
|
||||
options: {
|
||||
scales: {
|
||||
yAxes: [{
|
||||
scaleLabel: {
|
||||
display: true,
|
||||
},
|
||||
ticks: {
|
||||
beginAtZero: true
|
||||
}
|
||||
}]
|
||||
y: {
|
||||
beginAtZero: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -242,14 +244,9 @@ Therefore, results are available for fewer models.
|
||||
data: leaderboardData,
|
||||
options: {
|
||||
scales: {
|
||||
yAxes: [{
|
||||
scaleLabel: {
|
||||
display: true,
|
||||
},
|
||||
ticks: {
|
||||
beginAtZero: true
|
||||
}
|
||||
}]
|
||||
y: {
|
||||
beginAtZero: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -321,6 +318,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
|
||||
latest_mod_date = max(mod_dates)
|
||||
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
|
||||
]]]-->
|
||||
October 22, 2024.
|
||||
November 11, 2024.
|
||||
<!--[[[end]]]-->
|
||||
</p>
|
||||
|
||||
@@ -95,5 +95,6 @@ cog.out(''.join(lines))
|
||||
- TOGETHERAI_API_KEY
|
||||
- VOLCENGINE_API_KEY
|
||||
- VOYAGE_API_KEY
|
||||
- XAI_API_KEY
|
||||
- XINFERENCE_API_KEY
|
||||
<!--[[[end]]]-->
|
||||
|
||||
@@ -100,7 +100,7 @@ aider --analytics-log filename.jsonl --no-analytics
|
||||
If you have concerns about any of the analytics that aider is collecting
|
||||
or our data practices
|
||||
please contact us by opening a
|
||||
[GitHub Issue](https://github.com/paul-gauthier/aider/issues).
|
||||
[GitHub Issue](https://github.com/aider-ai/aider/issues).
|
||||
|
||||
## Privacy policy
|
||||
|
||||
|
||||
@@ -55,8 +55,10 @@ model_list = "\n".join(f"- {model}" for model in sorted(prefill_models))
|
||||
|
||||
cog.out(model_list)
|
||||
]]]-->
|
||||
- anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
- anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
- anthropic/claude-3-5-sonnet-20241022
|
||||
- claude-3-5-haiku-20241022
|
||||
- claude-3-5-sonnet-20240620
|
||||
- claude-3-5-sonnet-20241022
|
||||
- claude-3-haiku-20240307
|
||||
@@ -87,7 +89,9 @@ cog.out(model_list)
|
||||
- mistral/open-mixtral-8x7b
|
||||
- mistral/pixtral-12b-2409
|
||||
- openrouter/anthropic/claude-3.5-sonnet
|
||||
- us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
- us.anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
- vertex_ai/claude-3-5-haiku@20241022
|
||||
- vertex_ai/claude-3-5-sonnet-v2@20241022
|
||||
- vertex_ai/claude-3-5-sonnet@20240620
|
||||
- vertex_ai/claude-3-haiku@20240307
|
||||
|
||||
@@ -95,3 +95,6 @@ io = InputOutput(yes=True)
|
||||
coder = Coder.create(model=model, fnames=fnames, io=io)
|
||||
```
|
||||
|
||||
{: .note }
|
||||
The scripting API is not officially supported or documented and may
|
||||
change without warning.
|
||||
|
||||
@@ -19,7 +19,19 @@ LLM edits that are "almost" correctly formatted.
|
||||
But sometimes the LLM just won't cooperate.
|
||||
In these cases, here are some things you might try.
|
||||
|
||||
## Use a capable model
|
||||
## Don't add too many files
|
||||
|
||||
Many LLMs now have very large context windows,
|
||||
but filling them with irrelevant code or conversation
|
||||
can confuse the model.
|
||||
|
||||
- Don't add too many files to the chat, *just* add the files you think need to be edited.
|
||||
Aider also sends the LLM a [map of your entire git repo](https://aider.chat/docs/repomap.html), so other relevant code will be included automatically.
|
||||
- Use `/drop` to remove files from the chat session which aren't needed for the task at hand. This will reduce distractions and may help the LLM produce properly formatted edits.
|
||||
- Use `/clear` to remove the conversation history, again to help the LLM focus.
|
||||
- Use `/tokens` to see how many tokens you are using for each message.
|
||||
|
||||
## Use a more capable model
|
||||
|
||||
If possible try using GPT-4o, Claude 3.5 Sonnet or Claude 3 Opus,
|
||||
as they are the strongest and most capable models.
|
||||
@@ -33,9 +45,9 @@ so editing errors are probably unavoidable.
|
||||
Local models which have been quantized are even more likely to have problems
|
||||
because they are not capable enough to follow aider's system prompts.
|
||||
|
||||
## Try the whole format
|
||||
## Try the whole edit format
|
||||
|
||||
Run aider with `--edit-format whole` if the model is using a different edit format.
|
||||
Run aider with `--edit-format whole` if were using a different edit format.
|
||||
You can see which edit format it is using in the announce lines:
|
||||
|
||||
```
|
||||
@@ -43,17 +55,6 @@ Aider v0.50.2-dev
|
||||
Models: claude-3-5-sonnet-20240620 with ♾️ diff edit format
|
||||
```
|
||||
|
||||
## Reduce distractions
|
||||
|
||||
Many LLMs now have very large context windows,
|
||||
but filling them with irrelevant code or conversation
|
||||
can confuse the model.
|
||||
|
||||
- Don't add too many files to the chat, *just* add the files you think need to be edited.
|
||||
Aider also sends the LLM a [map of your entire git repo](https://aider.chat/docs/repomap.html), so other relevant code will be included automatically.
|
||||
- Use `/drop` to remove files from the chat session which aren't needed for the task at hand. This will reduce distractions and may help the LLM produce properly formatted edits.
|
||||
- Use `/clear` to remove the conversation history, again to help the LLM focus.
|
||||
- Use `/tokens` to see how many tokens you are using for each message.
|
||||
|
||||
## More help
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
#
|
||||
aiohappyeyeballs==2.4.3
|
||||
# via aiohttp
|
||||
aiohttp==3.10.10
|
||||
aiohttp==3.11.2
|
||||
# via litellm
|
||||
aiosignal==1.3.1
|
||||
# via aiohttp
|
||||
@@ -86,9 +86,9 @@ importlib-resources==6.4.5
|
||||
# via -r requirements/requirements.in
|
||||
jinja2==3.1.4
|
||||
# via litellm
|
||||
jiter==0.7.0
|
||||
jiter==0.7.1
|
||||
# via openai
|
||||
json5==0.9.25
|
||||
json5==0.9.28
|
||||
# via -r requirements/requirements.in
|
||||
jsonschema==4.23.0
|
||||
# via
|
||||
@@ -96,7 +96,7 @@ jsonschema==4.23.0
|
||||
# litellm
|
||||
jsonschema-specifications==2024.10.1
|
||||
# via jsonschema
|
||||
litellm==1.51.2
|
||||
litellm==1.52.8
|
||||
# via -r requirements/requirements.in
|
||||
markdown-it-py==3.0.0
|
||||
# via rich
|
||||
@@ -120,9 +120,9 @@ numpy==1.26.4
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# scipy
|
||||
openai==1.53.0
|
||||
openai==1.54.4
|
||||
# via litellm
|
||||
packaging==24.1
|
||||
packaging==24.2
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# huggingface-hub
|
||||
@@ -139,7 +139,9 @@ posthog==3.7.0
|
||||
prompt-toolkit==3.0.48
|
||||
# via -r requirements/requirements.in
|
||||
propcache==0.2.0
|
||||
# via yarl
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
psutil==6.1.0
|
||||
# via -r requirements/requirements.in
|
||||
ptyprocess==0.7.0
|
||||
@@ -176,7 +178,7 @@ referencing==0.35.1
|
||||
# via
|
||||
# jsonschema
|
||||
# jsonschema-specifications
|
||||
regex==2024.9.11
|
||||
regex==2024.11.6
|
||||
# via tiktoken
|
||||
requests==2.32.3
|
||||
# via
|
||||
@@ -185,9 +187,9 @@ requests==2.32.3
|
||||
# mixpanel
|
||||
# posthog
|
||||
# tiktoken
|
||||
rich==13.9.3
|
||||
rich==13.9.4
|
||||
# via -r requirements/requirements.in
|
||||
rpds-py==0.20.1
|
||||
rpds-py==0.21.0
|
||||
# via
|
||||
# jsonschema
|
||||
# referencing
|
||||
@@ -217,7 +219,7 @@ tokenizers==0.19.1
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# litellm
|
||||
tqdm==4.66.6
|
||||
tqdm==4.67.0
|
||||
# via
|
||||
# huggingface-hub
|
||||
# openai
|
||||
@@ -241,5 +243,5 @@ wcwidth==0.2.13
|
||||
# via prompt-toolkit
|
||||
yarl==1.17.1
|
||||
# via aiohttp
|
||||
zipp==3.20.2
|
||||
zipp==3.21.0
|
||||
# via importlib-metadata
|
||||
|
||||
@@ -13,7 +13,7 @@ attrs==24.2.0
|
||||
# -c requirements/requirements-help.txt
|
||||
# jsonschema
|
||||
# referencing
|
||||
blinker==1.8.2
|
||||
blinker==1.9.0
|
||||
# via streamlit
|
||||
cachetools==5.5.0
|
||||
# via streamlit
|
||||
@@ -92,7 +92,7 @@ mdurl==0.1.2
|
||||
# -c requirements/../requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# markdown-it-py
|
||||
narwhals==1.12.1
|
||||
narwhals==1.13.5
|
||||
# via altair
|
||||
numpy==1.26.4
|
||||
# via
|
||||
@@ -103,7 +103,7 @@ numpy==1.26.4
|
||||
# pandas
|
||||
# pydeck
|
||||
# streamlit
|
||||
packaging==24.1
|
||||
packaging==24.2
|
||||
# via
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
@@ -157,13 +157,13 @@ requests==2.32.3
|
||||
# -c requirements/requirements-dev.txt
|
||||
# -c requirements/requirements-help.txt
|
||||
# streamlit
|
||||
rich==13.9.3
|
||||
rich==13.9.4
|
||||
# via
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# streamlit
|
||||
rpds-py==0.20.1
|
||||
rpds-py==0.21.0
|
||||
# via
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
@@ -180,7 +180,7 @@ smmap==5.0.1
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
# gitdb
|
||||
streamlit==1.39.0
|
||||
streamlit==1.40.1
|
||||
# via -r requirements/requirements-browser.in
|
||||
tenacity==8.5.0
|
||||
# via
|
||||
|
||||
@@ -32,7 +32,7 @@ codespell==2.3.0
|
||||
# via -r requirements/requirements-dev.in
|
||||
cogapp==3.4.1
|
||||
# via -r requirements/requirements-dev.in
|
||||
contourpy==1.3.0
|
||||
contourpy==1.3.1
|
||||
# via matplotlib
|
||||
cycler==0.12.1
|
||||
# via matplotlib
|
||||
@@ -51,9 +51,9 @@ filelock==3.16.1
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
# virtualenv
|
||||
fonttools==4.54.1
|
||||
fonttools==4.55.0
|
||||
# via matplotlib
|
||||
identify==2.6.1
|
||||
identify==2.6.2
|
||||
# via pre-commit
|
||||
idna==3.10
|
||||
# via
|
||||
@@ -103,7 +103,7 @@ numpy==1.26.4
|
||||
# contourpy
|
||||
# matplotlib
|
||||
# pandas
|
||||
packaging==24.1
|
||||
packaging==24.2
|
||||
# via
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
@@ -164,7 +164,7 @@ requests==2.32.3
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
# sphinx
|
||||
rich==13.9.3
|
||||
rich==13.9.4
|
||||
# via
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
@@ -184,7 +184,7 @@ sphinx==8.1.3
|
||||
# via
|
||||
# sphinx-rtd-theme
|
||||
# sphinxcontrib-jquery
|
||||
sphinx-rtd-theme==3.0.1
|
||||
sphinx-rtd-theme==3.0.2
|
||||
# via lox
|
||||
sphinxcontrib-applehelp==2.0.0
|
||||
# via sphinx
|
||||
@@ -200,7 +200,7 @@ sphinxcontrib-qthelp==2.0.0
|
||||
# via sphinx
|
||||
sphinxcontrib-serializinghtml==2.0.0
|
||||
# via sphinx
|
||||
typer==0.12.5
|
||||
typer==0.13.0
|
||||
# via -r requirements/requirements-dev.in
|
||||
typing-extensions==4.12.2
|
||||
# via
|
||||
@@ -216,7 +216,7 @@ urllib3==2.2.3
|
||||
# requests
|
||||
virtualenv==20.27.1
|
||||
# via pre-commit
|
||||
wheel==0.44.0
|
||||
wheel==0.45.0
|
||||
# via pip-tools
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
|
||||
@@ -9,7 +9,7 @@ aiohappyeyeballs==2.4.3
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
# aiohttp
|
||||
aiohttp==3.10.10
|
||||
aiohttp==3.11.2
|
||||
# via
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
@@ -69,6 +69,8 @@ filelock==3.16.1
|
||||
# huggingface-hub
|
||||
# torch
|
||||
# transformers
|
||||
filetype==1.2.0
|
||||
# via llama-index-core
|
||||
frozenlist==1.5.0
|
||||
# via
|
||||
# -c requirements.txt
|
||||
@@ -128,7 +130,7 @@ joblib==1.4.2
|
||||
# via
|
||||
# nltk
|
||||
# scikit-learn
|
||||
llama-index-core==0.11.21
|
||||
llama-index-core==0.11.23
|
||||
# via
|
||||
# -r requirements/requirements-help.in
|
||||
# llama-index-embeddings-huggingface
|
||||
@@ -140,7 +142,7 @@ markupsafe==3.0.2
|
||||
# -c requirements/../requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# jinja2
|
||||
marshmallow==3.23.0
|
||||
marshmallow==3.23.1
|
||||
# via dataclasses-json
|
||||
mpmath==1.3.0
|
||||
# via sympy
|
||||
@@ -171,7 +173,7 @@ numpy==1.26.4
|
||||
# scikit-learn
|
||||
# scipy
|
||||
# transformers
|
||||
packaging==24.1
|
||||
packaging==24.2
|
||||
# via
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
@@ -190,6 +192,7 @@ propcache==0.2.0
|
||||
# via
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
# aiohttp
|
||||
# yarl
|
||||
pydantic==2.9.2
|
||||
# via
|
||||
@@ -209,7 +212,7 @@ pyyaml==6.0.2
|
||||
# huggingface-hub
|
||||
# llama-index-core
|
||||
# transformers
|
||||
regex==2024.9.11
|
||||
regex==2024.11.6
|
||||
# via
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
@@ -235,7 +238,7 @@ scipy==1.13.1
|
||||
# -c requirements/../requirements.txt
|
||||
# scikit-learn
|
||||
# sentence-transformers
|
||||
sentence-transformers==3.2.1
|
||||
sentence-transformers==3.3.0
|
||||
# via llama-index-embeddings-huggingface
|
||||
sniffio==1.3.1
|
||||
# via
|
||||
@@ -265,7 +268,7 @@ tokenizers==0.19.1
|
||||
# transformers
|
||||
torch==2.2.2
|
||||
# via sentence-transformers
|
||||
tqdm==4.66.6
|
||||
tqdm==4.67.0
|
||||
# via
|
||||
# -c requirements.txt
|
||||
# -c requirements/../requirements.txt
|
||||
|
||||
@@ -28,6 +28,10 @@ This looks like a duplicate of #{oldest_issue_number}. Please see the comments t
|
||||
|
||||
I'm going to close this issue for now. But please let me know if you think this is actually a distinct issue and I will reopen this issue.""" # noqa
|
||||
|
||||
STALE_COMMENT = """I'm labeling this issue as stale because it has been open for 2 weeks with no activity. If there are no additional comments, it will be closed in 7 days.""" # noqa
|
||||
|
||||
CLOSE_STALE_COMMENT = """I'm closing this issue because it has been stalled for 3 weeks with no activity. Feel free to add a comment here and we can re-open it. Or feel free to file a new issue at any time.""" # noqa
|
||||
|
||||
# GitHub API configuration
|
||||
GITHUB_API_URL = "https://api.github.com"
|
||||
REPO_OWNER = "Aider-AI"
|
||||
@@ -112,22 +116,187 @@ def comment_and_close_duplicate(issue, oldest_issue):
|
||||
print(f" - Commented and closed issue #{issue['number']}")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Handle duplicate GitHub issues")
|
||||
parser.add_argument(
|
||||
"--yes", action="store_true", help="Automatically close duplicates without prompting"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
def find_unlabeled_with_paul_comments(issues):
|
||||
unlabeled_issues = []
|
||||
for issue in issues:
|
||||
# Skip pull requests
|
||||
if "pull_request" in issue:
|
||||
continue
|
||||
|
||||
if not TOKEN:
|
||||
print("Error: Missing GITHUB_TOKEN environment variable. Please check your .env file.")
|
||||
if not issue["labels"] and issue["state"] == "open":
|
||||
# Get comments for this issue
|
||||
comments_url = (
|
||||
f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}/comments"
|
||||
)
|
||||
response = requests.get(comments_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
comments = response.json()
|
||||
|
||||
# Check if paul-gauthier has commented
|
||||
if any(comment["user"]["login"] == "paul-gauthier" for comment in comments):
|
||||
unlabeled_issues.append(issue)
|
||||
return unlabeled_issues
|
||||
|
||||
|
||||
def handle_unlabeled_issues(all_issues, auto_yes):
|
||||
print("\nFinding unlabeled issues with paul-gauthier comments...")
|
||||
unlabeled_issues = find_unlabeled_with_paul_comments(all_issues)
|
||||
|
||||
if not unlabeled_issues:
|
||||
print("No unlabeled issues with paul-gauthier comments found.")
|
||||
return
|
||||
|
||||
all_issues = get_issues("all")
|
||||
print(f"\nFound {len(unlabeled_issues)} unlabeled issues with paul-gauthier comments:")
|
||||
for issue in unlabeled_issues:
|
||||
print(f" - #{issue['number']}: {issue['title']} {issue['html_url']}")
|
||||
|
||||
if not auto_yes:
|
||||
confirm = input("\nDo you want to add the 'question' label to these issues? (y/n): ")
|
||||
if confirm.lower() != "y":
|
||||
print("Skipping labeling.")
|
||||
return
|
||||
|
||||
print("\nAdding 'question' label to issues...")
|
||||
for issue in unlabeled_issues:
|
||||
url = f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}"
|
||||
response = requests.patch(url, headers=headers, json={"labels": ["question"]})
|
||||
response.raise_for_status()
|
||||
print(f" - Added 'question' label to #{issue['number']}")
|
||||
|
||||
|
||||
def handle_stale_issues(all_issues, auto_yes):
|
||||
print("\nChecking for stale question issues...")
|
||||
|
||||
for issue in all_issues:
|
||||
# Skip if not open, not a question, already stale, or has been reopened
|
||||
if (
|
||||
issue["state"] != "open"
|
||||
or "question" not in [label["name"] for label in issue["labels"]]
|
||||
or "stale" in [label["name"] for label in issue["labels"]]
|
||||
or has_been_reopened(issue["number"])
|
||||
):
|
||||
continue
|
||||
|
||||
# Get latest activity timestamp from issue or its comments
|
||||
latest_activity = datetime.strptime(issue["updated_at"], "%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
# Check if issue is stale (no activity for 14 days)
|
||||
days_inactive = (datetime.now() - latest_activity).days
|
||||
if days_inactive >= 14:
|
||||
print(f"\nStale issue found: #{issue['number']}: {issue['title']}\n{issue['html_url']}")
|
||||
print(f" No activity for {days_inactive} days")
|
||||
|
||||
if not auto_yes:
|
||||
confirm = input("Add stale label and comment? (y/n): ")
|
||||
if confirm.lower() != "y":
|
||||
print("Skipping this issue.")
|
||||
continue
|
||||
|
||||
# Add comment
|
||||
comment_url = (
|
||||
f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}/comments"
|
||||
)
|
||||
response = requests.post(comment_url, headers=headers, json={"body": STALE_COMMENT})
|
||||
response.raise_for_status()
|
||||
|
||||
# Add stale label
|
||||
url = f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}"
|
||||
response = requests.patch(url, headers=headers, json={"labels": ["question", "stale"]})
|
||||
response.raise_for_status()
|
||||
|
||||
print(f" Added stale label and comment to #{issue['number']}")
|
||||
|
||||
|
||||
def handle_stale_closing(all_issues, auto_yes):
|
||||
print("\nChecking for issues to close or unstale...")
|
||||
|
||||
for issue in all_issues:
|
||||
# Skip if not open or not stale
|
||||
if issue["state"] != "open" or "stale" not in [label["name"] for label in issue["labels"]]:
|
||||
continue
|
||||
|
||||
# Get the timeline to find when the stale label was last added
|
||||
timeline_url = (
|
||||
f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}/timeline"
|
||||
)
|
||||
response = requests.get(timeline_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
events = response.json()
|
||||
|
||||
# Find the most recent stale label addition
|
||||
stale_events = [
|
||||
event
|
||||
for event in events
|
||||
if event.get("event") == "labeled" and event.get("label", {}).get("name") == "stale"
|
||||
]
|
||||
|
||||
if not stale_events:
|
||||
continue
|
||||
|
||||
latest_stale = datetime.strptime(stale_events[-1]["created_at"], "%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
# Get comments since the stale label
|
||||
comments_url = (
|
||||
f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}/comments"
|
||||
)
|
||||
response = requests.get(comments_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
comments = response.json()
|
||||
|
||||
# Check for comments newer than the stale label
|
||||
new_comments = [
|
||||
comment
|
||||
for comment in comments
|
||||
if datetime.strptime(comment["created_at"], "%Y-%m-%dT%H:%M:%SZ") > latest_stale
|
||||
]
|
||||
|
||||
if new_comments:
|
||||
print(f"\nFound new activity on stale issue #{issue['number']}: {issue['title']}")
|
||||
print(f" {len(new_comments)} new comments since stale label")
|
||||
|
||||
if not auto_yes:
|
||||
confirm = input("Remove stale label? (y/n): ")
|
||||
if confirm.lower() != "y":
|
||||
print("Skipping this issue.")
|
||||
continue
|
||||
|
||||
# Remove stale label but keep question label
|
||||
url = f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}"
|
||||
response = requests.patch(url, headers=headers, json={"labels": ["question"]})
|
||||
response.raise_for_status()
|
||||
print(f" Removed stale label from #{issue['number']}")
|
||||
else:
|
||||
# Check if it's been 7 days since stale label
|
||||
days_stale = (datetime.now() - latest_stale).days
|
||||
if days_stale >= 7:
|
||||
print(f"\nStale issue ready for closing #{issue['number']}: {issue['title']}")
|
||||
print(f" No activity for {days_stale} days since stale label")
|
||||
|
||||
if not auto_yes:
|
||||
confirm = input("Close this issue? (y/n): ")
|
||||
if confirm.lower() != "y":
|
||||
print("Skipping this issue.")
|
||||
continue
|
||||
|
||||
# Add closing comment
|
||||
comment_url = f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}/comments"
|
||||
response = requests.post(
|
||||
comment_url, headers=headers, json={"body": CLOSE_STALE_COMMENT}
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
# Close the issue
|
||||
url = f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}"
|
||||
response = requests.patch(url, headers=headers, json={"state": "closed"})
|
||||
response.raise_for_status()
|
||||
print(f" Closed issue #{issue['number']}")
|
||||
|
||||
|
||||
def handle_duplicate_issues(all_issues, auto_yes):
|
||||
open_issues = [issue for issue in all_issues if issue["state"] == "open"]
|
||||
grouped_open_issues = group_issues_by_subject(open_issues)
|
||||
|
||||
print("Analyzing issues (skipping reopened issues)...")
|
||||
print("Looking for duplicate issues (skipping reopened issues)...")
|
||||
for subject, issues in grouped_open_issues.items():
|
||||
oldest_issue = find_oldest_issue(subject, all_issues)
|
||||
if not oldest_issue:
|
||||
@@ -149,14 +318,12 @@ def main():
|
||||
f" {oldest_issue['html_url']} ({oldest_issue['state']})"
|
||||
)
|
||||
|
||||
if not args.yes:
|
||||
# Confirmation prompt
|
||||
if not auto_yes:
|
||||
confirm = input("Do you want to comment and close duplicate issues? (y/n): ")
|
||||
if confirm.lower() != "y":
|
||||
print("Skipping this group of issues.")
|
||||
continue
|
||||
|
||||
# Comment and close duplicate issues
|
||||
for issue in issues:
|
||||
if issue["number"] != oldest_issue["number"]:
|
||||
comment_and_close_duplicate(issue, oldest_issue)
|
||||
@@ -165,5 +332,24 @@ def main():
|
||||
print(f"Oldest issue #{oldest_issue['number']} left open")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Handle duplicate GitHub issues")
|
||||
parser.add_argument(
|
||||
"--yes", action="store_true", help="Automatically close duplicates without prompting"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not TOKEN:
|
||||
print("Error: Missing GITHUB_TOKEN environment variable. Please check your .env file.")
|
||||
return
|
||||
|
||||
all_issues = get_issues("all")
|
||||
|
||||
handle_unlabeled_issues(all_issues, args.yes)
|
||||
handle_stale_issues(all_issues, args.yes)
|
||||
handle_stale_closing(all_issues, args.yes)
|
||||
handle_duplicate_issues(all_issues, args.yes)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -17,6 +17,8 @@ from aider.utils import GitTemporaryDirectory
|
||||
class TestCoder(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.GPT35 = Model("gpt-3.5-turbo")
|
||||
self.webbrowser_patcher = patch("aider.io.webbrowser.open")
|
||||
self.mock_webbrowser = self.webbrowser_patcher.start()
|
||||
|
||||
def test_allowed_to_edit(self):
|
||||
with GitTemporaryDirectory():
|
||||
|
||||
@@ -1210,6 +1210,26 @@ class TestCommands(TestCase):
|
||||
del commands
|
||||
del repo
|
||||
|
||||
def test_cmd_add_gitignored_file(self):
|
||||
with GitTemporaryDirectory():
|
||||
# Create a .gitignore file
|
||||
gitignore = Path(".gitignore")
|
||||
gitignore.write_text("*.ignored\n")
|
||||
|
||||
# Create a file that matches the gitignore pattern
|
||||
ignored_file = Path("test.ignored")
|
||||
ignored_file.write_text("This should be ignored")
|
||||
|
||||
io = InputOutput(pretty=False, fancy_input=False, yes=False)
|
||||
coder = Coder.create(self.GPT35, None, io)
|
||||
commands = Commands(io, coder)
|
||||
|
||||
# Try to add the ignored file
|
||||
commands.cmd_add(str(ignored_file))
|
||||
|
||||
# Verify the file was not added
|
||||
self.assertEqual(len(coder.abs_fnames), 0)
|
||||
|
||||
def test_cmd_add_aiderignored_file(self):
|
||||
with GitTemporaryDirectory():
|
||||
repo = git.Repo()
|
||||
|
||||
@@ -10,6 +10,7 @@ from aider.coders import editblock_coder as eb
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.io import InputOutput
|
||||
from aider.models import Model
|
||||
from aider.utils import ChdirTemporaryDirectory
|
||||
|
||||
|
||||
class TestUtils(unittest.TestCase):
|
||||
@@ -341,6 +342,46 @@ These changes replace the `subprocess.run` patches with `subprocess.check_output
|
||||
result = eb.replace_most_similar_chunk(whole, part, replace)
|
||||
self.assertEqual(result, expected_output)
|
||||
|
||||
def test_create_new_file_with_other_file_in_chat(self):
|
||||
# https://github.com/Aider-AI/aider/issues/2258
|
||||
with ChdirTemporaryDirectory():
|
||||
# Create a few temporary files
|
||||
file1 = "file.txt"
|
||||
|
||||
with open(file1, "w", encoding="utf-8") as f:
|
||||
f.write("one\ntwo\nthree\n")
|
||||
|
||||
files = [file1]
|
||||
|
||||
# Initialize the Coder object with the mocked IO and mocked repo
|
||||
coder = Coder.create(
|
||||
self.GPT35, "diff", use_git=False, io=InputOutput(yes=True), fnames=files
|
||||
)
|
||||
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = f"""
|
||||
Do this:
|
||||
|
||||
newfile.txt
|
||||
<<<<<<< SEARCH
|
||||
=======
|
||||
creating a new file
|
||||
>>>>>>> REPLACE
|
||||
|
||||
"""
|
||||
coder.partial_response_function_call = dict()
|
||||
return []
|
||||
|
||||
coder.send = mock_send
|
||||
|
||||
coder.run(with_message="hi")
|
||||
|
||||
content = Path(file1).read_text(encoding="utf-8")
|
||||
self.assertEqual(content, "one\ntwo\nthree\n")
|
||||
|
||||
content = Path("newfile.txt").read_text(encoding="utf-8")
|
||||
self.assertEqual(content, "creating a new file\n")
|
||||
|
||||
def test_full_edit(self):
|
||||
# Create a few temporary files
|
||||
_, file1 = tempfile.mkstemp()
|
||||
|
||||
65
tests/basic/test_exceptions.py
Normal file
65
tests/basic/test_exceptions.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from aider.exceptions import ExInfo, LiteLLMExceptions
|
||||
|
||||
|
||||
def test_litellm_exceptions_load():
|
||||
"""Test that LiteLLMExceptions loads without errors"""
|
||||
ex = LiteLLMExceptions()
|
||||
assert len(ex.exceptions) > 0
|
||||
|
||||
|
||||
def test_exceptions_tuple():
|
||||
"""Test that exceptions_tuple returns a non-empty tuple"""
|
||||
ex = LiteLLMExceptions()
|
||||
assert isinstance(ex.exceptions_tuple(), tuple)
|
||||
assert len(ex.exceptions_tuple()) > 0
|
||||
|
||||
|
||||
def test_get_ex_info():
|
||||
"""Test get_ex_info returns correct ExInfo"""
|
||||
ex = LiteLLMExceptions()
|
||||
|
||||
# Test with a known exception type
|
||||
from litellm import AuthenticationError
|
||||
|
||||
auth_error = AuthenticationError(
|
||||
message="Invalid API key", llm_provider="openai", model="gpt-4"
|
||||
)
|
||||
ex_info = ex.get_ex_info(auth_error)
|
||||
assert isinstance(ex_info, ExInfo)
|
||||
assert ex_info.name == "AuthenticationError"
|
||||
assert ex_info.retry is False
|
||||
assert "API key" in ex_info.description
|
||||
|
||||
# Test with unknown exception type
|
||||
class UnknownError(Exception):
|
||||
pass
|
||||
|
||||
unknown = UnknownError()
|
||||
ex_info = ex.get_ex_info(unknown)
|
||||
assert isinstance(ex_info, ExInfo)
|
||||
assert ex_info.name is None
|
||||
assert ex_info.retry is None
|
||||
assert ex_info.description is None
|
||||
|
||||
|
||||
def test_rate_limit_error():
|
||||
"""Test specific handling of RateLimitError"""
|
||||
ex = LiteLLMExceptions()
|
||||
from litellm import RateLimitError
|
||||
|
||||
rate_error = RateLimitError(message="Rate limit exceeded", llm_provider="openai", model="gpt-4")
|
||||
ex_info = ex.get_ex_info(rate_error)
|
||||
assert ex_info.retry is True
|
||||
assert "rate limited" in ex_info.description.lower()
|
||||
|
||||
|
||||
def test_context_window_error():
|
||||
"""Test specific handling of ContextWindowExceededError"""
|
||||
ex = LiteLLMExceptions()
|
||||
from litellm import ContextWindowExceededError
|
||||
|
||||
ctx_error = ContextWindowExceededError(
|
||||
message="Context length exceeded", model="gpt-4", llm_provider="openai"
|
||||
)
|
||||
ex_info = ex.get_ex_info(ctx_error)
|
||||
assert ex_info.retry is False
|
||||
@@ -32,7 +32,7 @@ class TestMain(TestCase):
|
||||
os.environ["HOME"] = self.homedir_obj.name
|
||||
self.input_patcher = patch("builtins.input", return_value=None)
|
||||
self.mock_input = self.input_patcher.start()
|
||||
self.webbrowser_patcher = patch("webbrowser.open")
|
||||
self.webbrowser_patcher = patch("aider.io.webbrowser.open")
|
||||
self.mock_webbrowser = self.webbrowser_patcher.start()
|
||||
|
||||
def tearDown(self):
|
||||
|
||||
@@ -127,8 +127,9 @@ def test_git_index_version_greater_than_2(mock_browser, create_repo, mock_io):
|
||||
"You may be able to convert your repo: git update-index --index-version=2"
|
||||
)
|
||||
mock_io.tool_output.assert_any_call("Or run aider --no-git to proceed without using git.")
|
||||
mock_io.confirm_ask.assert_any_call(
|
||||
"Open documentation url for more info?", subject=urls.git_index_version
|
||||
mock_io.offer_url.assert_any_call(
|
||||
urls.git_index_version,
|
||||
"Open documentation url for more info?",
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import httpx
|
||||
|
||||
from aider.exceptions import LiteLLMExceptions
|
||||
from aider.llm import litellm
|
||||
from aider.sendchat import retry_exceptions, simple_send_with_retries
|
||||
from aider.sendchat import send_completion, simple_send_with_retries
|
||||
|
||||
|
||||
class PrintCalled(Exception):
|
||||
@@ -12,9 +11,13 @@ class PrintCalled(Exception):
|
||||
|
||||
|
||||
class TestSendChat(unittest.TestCase):
|
||||
def test_retry_exceptions(self):
|
||||
"""Test that retry_exceptions() can be called without raising errors"""
|
||||
retry_exceptions() # Should not raise any exceptions
|
||||
def setUp(self):
|
||||
self.mock_messages = [{"role": "user", "content": "Hello"}]
|
||||
self.mock_model = "gpt-4"
|
||||
|
||||
def test_litellm_exceptions(self):
|
||||
litellm_ex = LiteLLMExceptions()
|
||||
litellm_ex._load(strict=True)
|
||||
|
||||
@patch("litellm.completion")
|
||||
@patch("builtins.print")
|
||||
@@ -24,7 +27,7 @@ class TestSendChat(unittest.TestCase):
|
||||
|
||||
# Set up the mock to raise
|
||||
mock_completion.side_effect = [
|
||||
litellm.exceptions.RateLimitError(
|
||||
litellm.RateLimitError(
|
||||
"rate limit exceeded",
|
||||
response=mock,
|
||||
llm_provider="llm_provider",
|
||||
@@ -35,17 +38,56 @@ class TestSendChat(unittest.TestCase):
|
||||
|
||||
# Call the simple_send_with_retries method
|
||||
simple_send_with_retries("model", ["message"])
|
||||
assert mock_print.call_count == 2
|
||||
assert mock_print.call_count == 3
|
||||
|
||||
@patch("litellm.completion")
|
||||
def test_send_completion_basic(self, mock_completion):
|
||||
# Setup mock response
|
||||
mock_response = MagicMock()
|
||||
mock_completion.return_value = mock_response
|
||||
|
||||
# Test basic send_completion
|
||||
hash_obj, response = send_completion(
|
||||
self.mock_model, self.mock_messages, functions=None, stream=False
|
||||
)
|
||||
|
||||
assert response == mock_response
|
||||
mock_completion.assert_called_once()
|
||||
|
||||
@patch("litellm.completion")
|
||||
def test_send_completion_with_functions(self, mock_completion):
|
||||
mock_function = {"name": "test_function", "parameters": {"type": "object"}}
|
||||
|
||||
hash_obj, response = send_completion(
|
||||
self.mock_model, self.mock_messages, functions=[mock_function], stream=False
|
||||
)
|
||||
|
||||
# Verify function was properly included in tools
|
||||
called_kwargs = mock_completion.call_args.kwargs
|
||||
assert "tools" in called_kwargs
|
||||
assert called_kwargs["tools"][0]["function"] == mock_function
|
||||
|
||||
@patch("litellm.completion")
|
||||
def test_simple_send_attribute_error(self, mock_completion):
|
||||
# Setup mock to raise AttributeError
|
||||
mock_completion.return_value = MagicMock()
|
||||
mock_completion.return_value.choices = None
|
||||
|
||||
# Should return None on AttributeError
|
||||
result = simple_send_with_retries(self.mock_model, self.mock_messages)
|
||||
assert result is None
|
||||
|
||||
@patch("litellm.completion")
|
||||
@patch("builtins.print")
|
||||
def test_simple_send_with_retries_connection_error(self, mock_print, mock_completion):
|
||||
# Set up the mock to raise
|
||||
mock_completion.side_effect = [
|
||||
httpx.ConnectError("Connection error"),
|
||||
None,
|
||||
]
|
||||
def test_simple_send_non_retryable_error(self, mock_print, mock_completion):
|
||||
# Test with an error that shouldn't trigger retries
|
||||
mock = MagicMock()
|
||||
mock.status_code = 400
|
||||
|
||||
# Call the simple_send_with_retries method
|
||||
simple_send_with_retries("model", ["message"])
|
||||
assert mock_print.call_count == 2
|
||||
mock_completion.side_effect = litellm.NotFoundError(
|
||||
message="Invalid request", llm_provider="test_provider", model="test_model"
|
||||
)
|
||||
|
||||
result = simple_send_with_retries(self.mock_model, self.mock_messages)
|
||||
assert result is None
|
||||
assert mock_print.call_count == 1
|
||||
|
||||
@@ -44,7 +44,7 @@ class TestScrape(unittest.TestCase):
|
||||
self.commands.io.tool_error = mock_print_error
|
||||
|
||||
# Run the cmd_web command
|
||||
result = self.commands.cmd_web("https://example.com")
|
||||
result = self.commands.cmd_web("https://example.com", return_content=True)
|
||||
|
||||
# Assert that the result contains some content
|
||||
self.assertIsNotNone(result)
|
||||
|
||||
Reference in New Issue
Block a user