import argparse import argcomplete import os from dataclasses import dataclass from typing import Tuple, Optional from enum import Enum class AutoDetectedOption(Enum): ON = 'on' OFF = 'off' AUTO = 'auto' def __str__(self : "AutoDetectedOption"): return self.value ###################### ## PUBLIC INTERFACE ## ###################### @dataclass class RawArguments: args : argparse.Namespace debug : bool openai_key : Optional[str] = None def parse_raw_args_or_complete() -> RawArguments: parser, debug = _construct_parser() argcomplete.autocomplete( parser ) args = parser.parse_args() openai_key = os.getenv("OPENAI_KEY", os.getenv("OPENAI_API_KEY")) return RawArguments( args = args, debug = debug, openai_key = openai_key ) ##################### ## PRIVATE LOGIC ## ##################### _GPT_CLI_ENV_PREFIX = "GPT_CLI_" def _construct_parser() \ -> Tuple[argparse.ArgumentParser, bool]: debug = os.getenv(f'{_GPT_CLI_ENV_PREFIX}DEBUG') is not None parser = argparse.ArgumentParser() parser.add_argument( "-m", "--model", default=os.getenv(f'{_GPT_CLI_ENV_PREFIX}MODEL', "gpt-3.5-turbo"), help="ID of the model to use", ) parser.add_argument( "-t", "--temperature", type=float, default=os.getenv(f'{_GPT_CLI_ENV_PREFIX}TEMPERATURE', 0.5), help=( "What sampling temperature to use, between 0 and 2. Higher values " "like 0.8 will make the output more random, while lower values " "like 0.2 will make it more focused and deterministic." ), ) parser.add_argument( "-f", "--frequency-penalty", type=float, default=os.getenv(f'{_GPT_CLI_ENV_PREFIX}FREQUENCY_PENALTY', 0), help=( "Number between -2.0 and 2.0. Positive values penalize new tokens based " "on their existing frequency in the text so far, decreasing the model's " "likelihood to repeat the same line verbatim." ), ) parser.add_argument( "-p", "--presence-penalty", type=float, default=os.getenv(f'{_GPT_CLI_ENV_PREFIX}PRESENCE_PENALTY', 0), help=( "Number between -2.0 and 2.0. Positive values penalize new tokens based " "on whether they appear in the text so far, increasing the model's " "likelihood to talk about new topics." ), ) parser.add_argument( "-k", "--max-tokens", type=int, default=os.getenv(f'{_GPT_CLI_ENV_PREFIX}MAX_TOKENS', 2048), help=( "The maximum number of tokens to generate in the chat completion. " "Defaults to 2048." ), ) parser.add_argument( "-s", "--top-p", type=float, default=os.getenv(f'{_GPT_CLI_ENV_PREFIX}TOP_P', 1), help=( "An alternative to sampling with temperature, called nucleus sampling, " "where the model considers the results of the tokens with top_p " "probability mass. So 0.1 means only the tokens comprising the top 10%% " "probability mass are considered." ), ) parser.add_argument( "-n", "--n-completions", type=int, default=os.getenv('f{_GPT_CLI_ENV_PREFIX}N_COMPLETIONS', 1), help="How many chat completion choices to generate for each input message.", ) parser.add_argument( "--system-message", type=str, default=os.getenv('f{_GPT_CLI_ENV_PREFIX}SYSTEM_MESSAGE'), help="Specify an alternative system message.", ) parser.add_argument( "--adornments", type=AutoDetectedOption, choices=list(AutoDetectedOption), default=AutoDetectedOption.AUTO, help=( "Show adornments to indicate the model and response." " Can be set to 'on', 'off', or 'auto'." ) ) parser.add_argument( "--color", type=AutoDetectedOption, choices=list(AutoDetectedOption), default=AutoDetectedOption.AUTO, help="Set color to 'on', 'off', or 'auto'.", ) parser.add_argument( "--version", action="store_true", help="Print version and exit" ) parser.add_argument( "-l", "--list-models", action="store_true", help="List models and exit" ) parser.add_argument( "-i", "--interactive", action="store_true", help="Start an interactive session" ) initial_prompt = parser.add_mutually_exclusive_group() initial_prompt.add_argument( '--prompt-from-fd', type=int, help="Obtain the initial prompt from the specified file descriptor", ) initial_prompt.add_argument( '--prompt-from-file', type=str, help="Obtain the initial prompt from the specified file", ) parser.add_argument( "message", type=str, nargs='?', help=( "The contents of the message. When in a interactive session, this is " " the initial prompt provided." ), ) if debug: group = parser.add_mutually_exclusive_group() group.add_argument( '--save-response-to-file', type=str, help="UNSTABLE: save the response to a file. This can reply a response for debugging purposes", ) group.add_argument( '--load-response-from-file', type=str, help="UNSTABLE: load a response from a file. This can reply a response for debugging purposes", ) return parser, debug