Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

C64 Basic AI coding assistant #117

Merged
merged 21 commits into from
Sep 29, 2024
Merged
Changes from 1 commit
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
a5a3873
Add DebugInfo data per System.
highbyte Sep 8, 2024
464e9b8
Make SadConsole Debug Info panel general (non-system specific).
highbyte Sep 8, 2024
ffabc62
WIP: C64 Basic coding assistant with OpenAI integration.
highbyte Sep 10, 2024
3f3edbe
WIP: C64 Basic coding assistant with OpenAI integration.
highbyte Sep 10, 2024
c0197bc
WIP: Improving C64 Basic Assistant to consider entire Basic program (…
highbyte Sep 11, 2024
4fd728b
WIP: C64 Coding Assistant integration in WASM UI.
highbyte Sep 12, 2024
04d175d
Merge from master
highbyte Sep 12, 2024
968ed16
WASM UI startup timing issue fixed between OnInitializedAsync and OnA…
highbyte Sep 12, 2024
0d92e48
Move stream helper method to new class.
highbyte Sep 12, 2024
3ce186d
SadConsole UI for toggling C64 Basic AI coding assistant
highbyte Sep 13, 2024
0cae65a
Create new library Highbyte.DotNet6502.AI that now contains shared Op…
highbyte Sep 13, 2024
6fc9bee
WIP: Better SadConsole implementation for C64 code assistant AI. Cust…
highbyte Sep 20, 2024
dbee6c5
Common on how to get the game Elite (1984) working in C64 emulator.
highbyte Sep 20, 2024
87d5125
Add api key to custom AI code completion endpoint.
highbyte Sep 25, 2024
00bf848
Improve C64 AI coding assistant
highbyte Sep 26, 2024
7e5f0a2
Cleanup VSCode launch.json and task.json
highbyte Sep 26, 2024
a293c0d
Fix layout
highbyte Sep 27, 2024
451438e
Fix layout
highbyte Sep 27, 2024
a0d65a1
WIP: move C64 ROM config to not be specific for each model config.
highbyte Sep 28, 2024
a35cc02
Cleanup code
highbyte Sep 28, 2024
57cb6a8
Fix broken tests
highbyte Sep 29, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
WIP: C64 Coding Assistant integration in WASM UI.
  • Loading branch information
highbyte committed Sep 12, 2024
commit 4fd728bd9f45cff11b1063f32302aaf0bb9d1666
Original file line number Diff line number Diff line change
@@ -86,7 +86,7 @@ NAudioAudioHandlerContext audioHandlerContext
var c64 = (C64)system;

var renderer = new C64SadConsoleRenderer(c64, renderContext);
var inputHandler = new C64SadConsoleInputHandler(c64, inputHandlerContext, _loggerFactory, GetCodeCompletion);
var inputHandler = new C64SadConsoleInputHandler(c64, inputHandlerContext, _loggerFactory, GetCodeCompletionAsync);
var audioHandler = new C64NAudioAudioHandler(c64, audioHandlerContext, _loggerFactory);

return new SystemRunner(c64, renderer, inputHandler, audioHandler);
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
// Based on https://github.com/dotnet/smartcomponents

namespace Highbyte.DotNet6502.App.WASM.CodingAssistant.Inference;

public class ChatParameters
{
public IList<ChatMessage>? Messages { get; set; }
public float? Temperature { get; set; }
public float? TopP { get; set; }
public int? MaxTokens { get; set; }
public float? FrequencyPenalty { get; set; }
public float? PresencePenalty { get; set; }
public IList<string>? StopSequences { get; set; }
}

public class ChatMessage(ChatMessageRole role, string text)
{
public ChatMessageRole Role => role;
public string Text => text;
}

public enum ChatMessageRole
{
System,
User,
Assistant,
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
// Based on https://github.com/dotnet/smartcomponents

namespace Highbyte.DotNet6502.App.WASM.CodingAssistant.Inference;

public struct CodeCompletionConfig
{
public string? Parameters { get; set; }
public string? UserRole { get; set; }
//public string[]? UserPhrases { get; set; }
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
// Based on https://github.com/dotnet/smartcomponents
using System.Text;

namespace Highbyte.DotNet6502.App.WASM.CodingAssistant.Inference;

public class CodeCompletionInference

{
public virtual ChatParameters BuildPrompt(CodeCompletionConfig config, string textBefore, string textAfter)
{
var systemMessageBuilder = new StringBuilder();
// systemMessageBuilder.Append(@"Predict what text the user in the given ROLE would insert at the cursor position indicated by ^^^.
//Only give predictions for which you have an EXTREMELY high confidence that the user would insert that EXACT text.
//Do not make up new information. If you're not sure, just reply with NO_PREDICTION.

//RULES:
//1. Reply with OK:, then in square brackets the predicted text, then END_INSERTION, and no other output.
//2. When a specific value or quantity cannot be inferred and would need to be provided, use the word NEED_INFO.
//3. If there isn't enough information to predict any words that the user would type next, just reply with the word NO_PREDICTION.
//4. NEVER invent new information. If you can't be sure what the user is about to type, ALWAYS stop the prediction with END_INSERTION.");

systemMessageBuilder.Append(@"You are a Code completion AI assistant who responds exclusively using Commodore 64 Basic source code.
Predict what text the user in would insert at the cursor position indicated by ^^^.
Only give predictions for which you have an EXTREMELY high confidence that the user would insert that EXACT text.
Do not make up new information. If you're not sure, just reply with NO_PREDICTION.

RULES:
1. Reply with OK:,then in square brackets (with not preceeding space) the predicted text, then END_INSERTION, and no other output.
2. If there isn't enough information to predict any words that the user would type next, just reply with the word NO_PREDICTION.
3. NEVER invent new information. If you can't be sure what the user is about to type, ALWAYS stop the prediction with END_INSERTION.");


//if (config.UserPhrases is { Length: > 0 } stockPhrases)
//{
// systemMessageBuilder.Append("\nAlways try to use variations on the following phrases as part of the predictions:\n");
// foreach (var phrase in stockPhrases)
// {
// systemMessageBuilder.AppendFormat("- {0}\n", phrase);
// }
//}

List<ChatMessage> messages =
[
new(ChatMessageRole.System, systemMessageBuilder.ToString()),

// new(ChatMessageRole.User, @"ROLE: Family member sending a text
//USER_TEXT: Hey, it's a nice day - the weather is ^^^"),
// new(ChatMessageRole.Assistant, @"OK:[great!]END_INSERTION"),

// new(ChatMessageRole.User, @"ROLE: Customer service assistant
//USER_TEXT: You can find more information on^^^

////Alternatively, phone us."),
// new(ChatMessageRole.Assistant, @"OK:[ our website at NEED_INFO]END_INSERTION"),

// new(ChatMessageRole.User, @"ROLE: Casual
//USER_TEXT: Oh I see!

//Well sure thing, we can"),
// new(ChatMessageRole.Assistant, @"OK:[ help you out with that!]END_INSERTION"),

// new(ChatMessageRole.User, @"ROLE: Storyteller
//USER_TEXT: Sir Digby Chicken Caesar, also know^^^"),
// new(ChatMessageRole.Assistant, @"OK:[n as NEED_INFO]END_INSERTION"),

// new(ChatMessageRole.User, @"ROLE: Customer support agent
//USER_TEXT: Goodbye for now.^^^"),
// new(ChatMessageRole.Assistant, @"NO_PREDICTION END_INSERTION"),

// new(ChatMessageRole.User, @"ROLE: Pirate
//USER_TEXT: Have you found^^^"),
// new(ChatMessageRole.Assistant, @"OK:[ the treasure, me hearties?]END_INSERTION"),

// new(ChatMessageRole.User, @$"ROLE: {config.UserRole}
//USER_TEXT: {textBefore}^^^{textAfter}"),

// ];

// new(ChatMessageRole.User, @$"ROLE: {config.UserRole}
//USER_TEXT: {textBefore}^^^{textAfter}"),



//new(ChatMessageRole.User, @$"USER_TEXT: 10 print"),
//new(ChatMessageRole.Assistant, @"OK:[""C64 rules!!""]END_INSERTION"),

new(ChatMessageRole.User, @$"USER_TEXT: {textBefore}^^^{textAfter}")
];


return new ChatParameters
{
Messages = messages,
Temperature = 0,
MaxTokens = 400,
StopSequences = ["END_INSERTION", "NEED_INFO"],
FrequencyPenalty = 0,
PresencePenalty = 0,
};
}

public virtual async Task<string> GetInsertionSuggestionAsync(IInferenceBackend inference, CodeCompletionConfig config, string textBefore, string textAfter)
{
var chatOptions = BuildPrompt(config, textBefore, textAfter);
var response = await inference.GetChatResponseAsync(chatOptions);
if (response.Length > 5 && response.StartsWith("OK:[", StringComparison.Ordinal))
{
// Avoid returning multiple sentences as it's unlikely to avoid inventing some new train of thought.
var trimAfter = response.IndexOfAny(['.', '?', '!']);
if (trimAfter > 0 && response.Length > trimAfter + 1 && response[trimAfter + 1] == ' ')
response = response.Substring(0, trimAfter + 1);

// Leave it up to the frontend code to decide whether to add a training space
var trimmedResponse = response.Substring(4).TrimEnd(']', ' ');

// Don't have a leading space on the suggestion if there's already a space right
// before the cursor. The language model normally gets this right anyway (distinguishing
// between starting a new word, vs continuing a partly-typed one) but sometimes it adds
// an unnecessary extra space.
if (textBefore.Length > 0 && textBefore[textBefore.Length - 1] == ' ')
trimmedResponse = trimmedResponse.TrimStart(' ');

return trimmedResponse;
}

return string.Empty;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
// Based on https://github.com/dotnet/smartcomponents

namespace Highbyte.DotNet6502.App.WASM.CodingAssistant.Inference;

public interface IInferenceBackend
{
Task<string> GetChatResponseAsync(ChatParameters options);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
// Based on https://github.com/dotnet/smartcomponents

using Microsoft.Extensions.Configuration;

namespace Highbyte.DotNet6502.App.WASM.CodingAssistant.Inference.OpenAI;

public class ApiConfig
{
public string? ApiKey { get; set; }
public string? DeploymentName { get; set; }
public Uri? Endpoint { get; set; }
public bool SelfHosted { get; set; }

public const string CONFIG_SECTION = "OpenAI";

public ApiConfig()
{

}

public ApiConfig(IConfiguration config)
{
var configSection = config.GetRequiredSection(CONFIG_SECTION);

SelfHosted = configSection.GetValue<bool?>("SelfHosted") ?? false;

if (SelfHosted)
{
Endpoint = configSection.GetValue<Uri>("Endpoint")
?? throw new InvalidOperationException($"Missing required configuration value: {CONFIG_SECTION}:Endpoint. This is required for SelfHosted inference.");

// Ollama uses this, but other self-hosted backends might not, so it's optional.
DeploymentName = configSection.GetValue<string>("DeploymentName");

// Ollama doesn't use this, but other self-hosted backends might do, so it's optional.
ApiKey = configSection.GetValue<string>("ApiKey");
}
else
{
// If set, we assume Azure OpenAI. If not, we assume OpenAI.
Endpoint = configSection.GetValue<Uri>("Endpoint");

// For Azure OpenAI, it's your deployment name. For OpenAI, it's the model name.
DeploymentName = configSection.GetValue<string>("DeploymentName")
?? throw new InvalidOperationException($"Missing required configuration value: {CONFIG_SECTION}:DeploymentName");

ApiKey = configSection.GetValue<string>("ApiKey")
?? throw new InvalidOperationException($"Missing required configuration value: {CONFIG_SECTION}:ApiKey");
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
// Based on https://github.com/dotnet/smartcomponents

using System;
using System.Linq;
using System.Threading.Tasks;
using Azure;
using Azure.AI.OpenAI;

namespace Highbyte.DotNet6502.App.WASM.CodingAssistant.Inference.OpenAI;

public class OpenAIInferenceBackend(ApiConfig apiConfig)
: IInferenceBackend
{
public OpenAIInferenceBackend(IConfiguration configuration) : this(new ApiConfig(configuration))
{
}

public async Task<string> GetChatResponseAsync(ChatParameters options)
{
#if DEBUG
if (ResponseCache.TryGetCachedResponse(options, out var cachedResponse))
return cachedResponse!;
#endif

var client = CreateClient(apiConfig);
var chatCompletionsOptions = new ChatCompletionsOptions
{
DeploymentName = apiConfig.DeploymentName,
Temperature = options.Temperature ?? 0f,
NucleusSamplingFactor = options.TopP ?? 1,
MaxTokens = options.MaxTokens ?? 200,
FrequencyPenalty = options.FrequencyPenalty ?? 0,
PresencePenalty = options.PresencePenalty ?? 0,
};

foreach (var message in options.Messages ?? Enumerable.Empty<ChatMessage>())
{
chatCompletionsOptions.Messages.Add(message.Role switch
{
ChatMessageRole.System => new ChatRequestSystemMessage(message.Text),
ChatMessageRole.User => new ChatRequestUserMessage(message.Text),
ChatMessageRole.Assistant => new ChatRequestAssistantMessage(message.Text),
_ => throw new InvalidOperationException($"Unknown chat message role: {message.Role}")
});
}

if (options.StopSequences is { } stopSequences)
{
foreach (var stopSequence in stopSequences)
{
chatCompletionsOptions.StopSequences.Add(stopSequence);
}
}

var completionsResponse = await client.GetChatCompletionsAsync(chatCompletionsOptions);

var response = completionsResponse.Value.Choices.FirstOrDefault()?.Message.Content ?? string.Empty;

#if DEBUG
ResponseCache.SetCachedResponse(options, response);
#endif

return response;
}

private static OpenAIClient CreateClient(ApiConfig apiConfig)
{
if (apiConfig.SelfHosted)
{
var transport = new SelfHostedLlmTransport(apiConfig.Endpoint!);
return new OpenAIClient(apiConfig.ApiKey, new() { Transport = transport });
}
else if (apiConfig.Endpoint is null)
{
// OpenAI
return new OpenAIClient(apiConfig.ApiKey);
}
else
{
// Azure OpenAI
return new OpenAIClient(
apiConfig.Endpoint,
new AzureKeyCredential(apiConfig.ApiKey!));
}
}
}
Loading