StyledLines from testedlines.com: C# docs 1.0.1
GPT2-based text stylization LLM model, wrapped with Llama.cpp in Csharp to ensure compatibility across various platforms, including iOS and WebGL. The model is designed to transform generic texts into stylized, game or user-tailored dialogue.
Loading...
Searching...
No Matches
LlamaLibrary.gpt_params Class Reference
+ Inheritance diagram for LlamaLibrary.gpt_params:
+ Collaboration diagram for LlamaLibrary.gpt_params:

Public Member Functions

virtual void Dispose ()
 
 gpt_params ()
 

Protected Attributes

bool swigCMemOwn
 

Package Functions

 gpt_params (global::System.IntPtr cPtr, bool cMemoryOwn)
 

Static Package Functions

static global::System.Runtime.InteropServices.HandleRef getCPtr (gpt_params obj)
 

Properties

Strings antiprompt [get, set]
 
Strings api_keys [get, set]
 
SWIGTYPE_llama_attention_type attention_type [get, set]
 
string cache_type_k [get, set]
 
string cache_type_v [get, set]
 
SWIGTYPE_p_ggml_backend_sched_eval_callback cb_eval [get, set]
 
SWIGTYPE_p_void cb_eval_user_data [get, set]
 
string chat_template [get, set]
 
bool check_tensors [get, set]
 
string chunk_separator [get, set]
 
int chunk_size [get, set]
 
bool compute_ppl [get, set]
 
bool cont_batching [get, set]
 
Strings context_files [get, set]
 
int control_vector_layer_end [get, set]
 
int control_vector_layer_start [get, set]
 
LlamaControlVectorLoadInfos control_vectors [get, set]
 
bool conversation [get, set]
 
dimre_method cvector_dimre_method [get, set]
 
string cvector_negative_file [get, set]
 
string cvector_outfile [get, set]
 
string cvector_positive_file [get, set]
 
float defrag_thold [get, set]
 
bool display_prompt [get, set]
 
bool dump_kv_cache [get, set]
 
int embd_normalize [get, set]
 
string embd_out [get, set]
 
string embd_sep [get, set]
 
bool embedding [get, set]
 
bool enable_chat_template [get, set]
 
bool endpoint_metrics [get, set]
 
bool endpoint_slots [get, set]
 
bool escape [get, set]
 
bool flash_attn [get, set]
 
int grp_attn_n [get, set]
 
int grp_attn_w [get, set]
 
bool hellaswag [get, set]
 
uint hellaswag_tasks [get, set]
 
string hf_file [get, set]
 
string hf_repo [get, set]
 
string hf_token [get, set]
 
string hostname [get, set]
 
int i_chunk [get, set]
 
int i_pos [get, set]
 
bool ignore_eos [get, set]
 
Strings image [get, set]
 
Strings in_files [get, set]
 
bool infill [get, set]
 
string input_prefix [get, set]
 
bool input_prefix_bos [get, set]
 
string input_suffix [get, set]
 
bool interactive [get, set]
 
bool interactive_first [get, set]
 
bool is_pp_shared [get, set]
 
bool kl_divergence [get, set]
 
SWIGTYPE_p_std__vectorT_llama_model_kv_override_t kv_overrides [get, set]
 
bool log_json [get, set]
 
string logdir [get, set]
 
bool logits_all [get, set]
 
string logits_file [get, set]
 
string lookup_cache_dynamic [get, set]
 
string lookup_cache_static [get, set]
 
SWIGTYPE_p_std__vectorT_std__tupleT_std__string_float_t_t lora_adapter [get, set]
 
string lora_outfile [get, set]
 
int main_gpu [get, set]
 
string mmproj [get, set]
 
string model [get, set]
 
string model_alias [get, set]
 
string model_draft [get, set]
 
string model_url [get, set]
 
bool multiline_input [get, set]
 
bool multiple_choice [get, set]
 
uint multiple_choice_tasks [get, set]
 
int n_batch [get, set]
 
int n_chunks [get, set]
 
int n_ctx [get, set]
 
int n_draft [get, set]
 
int n_gpu_layers [get, set]
 
int n_gpu_layers_draft [get, set]
 
int n_junk [get, set]
 
int n_keep [get, set]
 
int n_out_freq [get, set]
 
int n_parallel [get, set]
 
int n_pca_batch [get, set]
 
int n_pca_iterations [get, set]
 
Int32s n_pl [get, set]
 
Int32s n_pp [get, set]
 
int n_predict [get, set]
 
int n_print [get, set]
 
int n_save_freq [get, set]
 
int n_sequences [get, set]
 
Int32s n_tg [get, set]
 
int n_threads [get, set]
 
int n_threads_batch [get, set]
 
int n_threads_batch_draft [get, set]
 
int n_threads_draft [get, set]
 
int n_threads_http [get, set]
 
int n_ubatch [get, set]
 
bool no_kv_offload [get, set]
 
SWIGTYPE_p_ggml_numa_strategy numa [get, set]
 
string out_file [get, set]
 
float p_split [get, set]
 
string path_prompt_cache [get, set]
 
SWIGTYPE_llama_pooling_type pooling_type [get, set]
 
int port [get, set]
 
int ppl_output_type [get, set]
 
int ppl_stride [get, set]
 
bool process_output [get, set]
 
string prompt [get, set]
 
bool prompt_cache_all [get, set]
 
bool prompt_cache_ro [get, set]
 
string prompt_file [get, set]
 
string public_path [get, set]
 
float rope_freq_base [get, set]
 
float rope_freq_scale [get, set]
 
SWIGTYPE_llama_rope_scaling_type rope_scaling_type [get, set]
 
string rpc_servers [get, set]
 
uint seed [get, set]
 
bool simple_io [get, set]
 
float slot_prompt_similarity [get, set]
 
string slot_save_path [get, set]
 
llama_sampling_params sparams [get, set]
 
bool special [get, set]
 
SWIGTYPE_llama_split_mode split_mode [get, set]
 
bool spm_infill [get, set]
 
string ssl_file_cert [get, set]
 
string ssl_file_key [get, set]
 
string system_prompt [get, set]
 
SWIGTYPE_p_float tensor_split [get, set]
 
int timeout_read [get, set]
 
int timeout_write [get, set]
 
bool usage [get, set]
 
bool use_color [get, set]
 
bool use_mlock [get, set]
 
bool use_mmap [get, set]
 
bool verbose_prompt [get, set]
 
int verbosity [get, set]
 
bool warmup [get, set]
 
bool winogrande [get, set]
 
uint winogrande_tasks [get, set]
 
float yarn_attn_factor [get, set]
 
float yarn_beta_fast [get, set]
 
float yarn_beta_slow [get, set]
 
float yarn_ext_factor [get, set]
 
int yarn_orig_ctx [get, set]
 

Private Member Functions

 ~gpt_params ()
 

Private Attributes

global::System.Runtime.InteropServices.HandleRef swigCPtr
 

Constructor & Destructor Documentation

◆ gpt_params() [1/2]

LlamaLibrary.gpt_params.gpt_params ( global::System::IntPtr cPtr,
bool cMemoryOwn )
package

◆ ~gpt_params()

LlamaLibrary.gpt_params.~gpt_params ( )
private

References LlamaLibrary.gpt_params.Dispose().

+ Here is the call graph for this function:

◆ gpt_params() [2/2]

LlamaLibrary.gpt_params.gpt_params ( )

Member Function Documentation

◆ Dispose()

virtual void LlamaLibrary.gpt_params.Dispose ( )
virtual

References LlamaLibrary.libllama_libPINVOKE.delete_gpt_params(), LlamaLibrary.gpt_params.swigCMemOwn, and LlamaLibrary.gpt_params.swigCPtr.

Referenced by Assets.StyledLines.Runtime.LlamaInfrenceUnity.RunModelAsync.OnDestroy(), and LlamaLibrary.gpt_params.~gpt_params().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getCPtr()

static global.System.Runtime.InteropServices.HandleRef LlamaLibrary.gpt_params.getCPtr ( gpt_params obj)
staticpackage

References LlamaLibrary.gpt_params.swigCPtr.

Referenced by LlamaLibrary.AsyncLlamaInfrence.SetupAsync().

+ Here is the caller graph for this function:

Member Data Documentation

◆ swigCMemOwn

bool LlamaLibrary.gpt_params.swigCMemOwn
protected

◆ swigCPtr

global.System.Runtime.InteropServices.HandleRef LlamaLibrary.gpt_params.swigCPtr
private

Property Documentation

◆ antiprompt

Strings LlamaLibrary.gpt_params.antiprompt
getset

◆ api_keys

Strings LlamaLibrary.gpt_params.api_keys
getset

◆ attention_type

SWIGTYPE_llama_attention_type LlamaLibrary.gpt_params.attention_type
getset

◆ cache_type_k

string LlamaLibrary.gpt_params.cache_type_k
getset

◆ cache_type_v

string LlamaLibrary.gpt_params.cache_type_v
getset

◆ cb_eval

SWIGTYPE_p_ggml_backend_sched_eval_callback LlamaLibrary.gpt_params.cb_eval
getset

◆ cb_eval_user_data

SWIGTYPE_p_void LlamaLibrary.gpt_params.cb_eval_user_data
getset

◆ chat_template

string LlamaLibrary.gpt_params.chat_template
getset

◆ check_tensors

bool LlamaLibrary.gpt_params.check_tensors
getset

◆ chunk_separator

string LlamaLibrary.gpt_params.chunk_separator
getset

◆ chunk_size

int LlamaLibrary.gpt_params.chunk_size
getset

◆ compute_ppl

bool LlamaLibrary.gpt_params.compute_ppl
getset

◆ cont_batching

bool LlamaLibrary.gpt_params.cont_batching
getset

◆ context_files

Strings LlamaLibrary.gpt_params.context_files
getset

◆ control_vector_layer_end

int LlamaLibrary.gpt_params.control_vector_layer_end
getset

◆ control_vector_layer_start

int LlamaLibrary.gpt_params.control_vector_layer_start
getset

◆ control_vectors

LlamaControlVectorLoadInfos LlamaLibrary.gpt_params.control_vectors
getset

◆ conversation

bool LlamaLibrary.gpt_params.conversation
getset

◆ cvector_dimre_method

dimre_method LlamaLibrary.gpt_params.cvector_dimre_method
getset

◆ cvector_negative_file

string LlamaLibrary.gpt_params.cvector_negative_file
getset

◆ cvector_outfile

string LlamaLibrary.gpt_params.cvector_outfile
getset

◆ cvector_positive_file

string LlamaLibrary.gpt_params.cvector_positive_file
getset

◆ defrag_thold

float LlamaLibrary.gpt_params.defrag_thold
getset

◆ display_prompt

bool LlamaLibrary.gpt_params.display_prompt
getset

◆ dump_kv_cache

bool LlamaLibrary.gpt_params.dump_kv_cache
getset

◆ embd_normalize

int LlamaLibrary.gpt_params.embd_normalize
getset

◆ embd_out

string LlamaLibrary.gpt_params.embd_out
getset

◆ embd_sep

string LlamaLibrary.gpt_params.embd_sep
getset

◆ embedding

bool LlamaLibrary.gpt_params.embedding
getset

◆ enable_chat_template

bool LlamaLibrary.gpt_params.enable_chat_template
getset

◆ endpoint_metrics

bool LlamaLibrary.gpt_params.endpoint_metrics
getset

◆ endpoint_slots

bool LlamaLibrary.gpt_params.endpoint_slots
getset

◆ escape

bool LlamaLibrary.gpt_params.escape
getset

◆ flash_attn

bool LlamaLibrary.gpt_params.flash_attn
getset

◆ grp_attn_n

int LlamaLibrary.gpt_params.grp_attn_n
getset

◆ grp_attn_w

int LlamaLibrary.gpt_params.grp_attn_w
getset

◆ hellaswag

bool LlamaLibrary.gpt_params.hellaswag
getset

◆ hellaswag_tasks

uint LlamaLibrary.gpt_params.hellaswag_tasks
getset

◆ hf_file

string LlamaLibrary.gpt_params.hf_file
getset

◆ hf_repo

string LlamaLibrary.gpt_params.hf_repo
getset

◆ hf_token

string LlamaLibrary.gpt_params.hf_token
getset

◆ hostname

string LlamaLibrary.gpt_params.hostname
getset

◆ i_chunk

int LlamaLibrary.gpt_params.i_chunk
getset

◆ i_pos

int LlamaLibrary.gpt_params.i_pos
getset

◆ ignore_eos

bool LlamaLibrary.gpt_params.ignore_eos
getset

◆ image

Strings LlamaLibrary.gpt_params.image
getset

◆ in_files

Strings LlamaLibrary.gpt_params.in_files
getset

◆ infill

bool LlamaLibrary.gpt_params.infill
getset

◆ input_prefix

string LlamaLibrary.gpt_params.input_prefix
getset

◆ input_prefix_bos

bool LlamaLibrary.gpt_params.input_prefix_bos
getset

◆ input_suffix

string LlamaLibrary.gpt_params.input_suffix
getset

◆ interactive

bool LlamaLibrary.gpt_params.interactive
getset

◆ interactive_first

bool LlamaLibrary.gpt_params.interactive_first
getset

◆ is_pp_shared

bool LlamaLibrary.gpt_params.is_pp_shared
getset

◆ kl_divergence

bool LlamaLibrary.gpt_params.kl_divergence
getset

◆ kv_overrides

SWIGTYPE_p_std__vectorT_llama_model_kv_override_t LlamaLibrary.gpt_params.kv_overrides
getset

◆ log_json

bool LlamaLibrary.gpt_params.log_json
getset

◆ logdir

string LlamaLibrary.gpt_params.logdir
getset

◆ logits_all

bool LlamaLibrary.gpt_params.logits_all
getset

◆ logits_file

string LlamaLibrary.gpt_params.logits_file
getset

◆ lookup_cache_dynamic

string LlamaLibrary.gpt_params.lookup_cache_dynamic
getset

◆ lookup_cache_static

string LlamaLibrary.gpt_params.lookup_cache_static
getset

◆ lora_adapter

SWIGTYPE_p_std__vectorT_std__tupleT_std__string_float_t_t LlamaLibrary.gpt_params.lora_adapter
getset

◆ lora_outfile

string LlamaLibrary.gpt_params.lora_outfile
getset

◆ main_gpu

int LlamaLibrary.gpt_params.main_gpu
getset

◆ mmproj

string LlamaLibrary.gpt_params.mmproj
getset

◆ model

string LlamaLibrary.gpt_params.model
getset

◆ model_alias

string LlamaLibrary.gpt_params.model_alias
getset

◆ model_draft

string LlamaLibrary.gpt_params.model_draft
getset

◆ model_url

string LlamaLibrary.gpt_params.model_url
getset

◆ multiline_input

bool LlamaLibrary.gpt_params.multiline_input
getset

◆ multiple_choice

bool LlamaLibrary.gpt_params.multiple_choice
getset

◆ multiple_choice_tasks

uint LlamaLibrary.gpt_params.multiple_choice_tasks
getset

◆ n_batch

int LlamaLibrary.gpt_params.n_batch
getset

◆ n_chunks

int LlamaLibrary.gpt_params.n_chunks
getset

◆ n_ctx

int LlamaLibrary.gpt_params.n_ctx
getset

◆ n_draft

int LlamaLibrary.gpt_params.n_draft
getset

◆ n_gpu_layers

int LlamaLibrary.gpt_params.n_gpu_layers
getset

◆ n_gpu_layers_draft

int LlamaLibrary.gpt_params.n_gpu_layers_draft
getset

◆ n_junk

int LlamaLibrary.gpt_params.n_junk
getset

◆ n_keep

int LlamaLibrary.gpt_params.n_keep
getset

◆ n_out_freq

int LlamaLibrary.gpt_params.n_out_freq
getset

◆ n_parallel

int LlamaLibrary.gpt_params.n_parallel
getset

◆ n_pca_batch

int LlamaLibrary.gpt_params.n_pca_batch
getset

◆ n_pca_iterations

int LlamaLibrary.gpt_params.n_pca_iterations
getset

◆ n_pl

Int32s LlamaLibrary.gpt_params.n_pl
getset

◆ n_pp

Int32s LlamaLibrary.gpt_params.n_pp
getset

◆ n_predict

int LlamaLibrary.gpt_params.n_predict
getset

◆ n_print

int LlamaLibrary.gpt_params.n_print
getset

◆ n_save_freq

int LlamaLibrary.gpt_params.n_save_freq
getset

◆ n_sequences

int LlamaLibrary.gpt_params.n_sequences
getset

◆ n_tg

Int32s LlamaLibrary.gpt_params.n_tg
getset

◆ n_threads

int LlamaLibrary.gpt_params.n_threads
getset

◆ n_threads_batch

int LlamaLibrary.gpt_params.n_threads_batch
getset

◆ n_threads_batch_draft

int LlamaLibrary.gpt_params.n_threads_batch_draft
getset

◆ n_threads_draft

int LlamaLibrary.gpt_params.n_threads_draft
getset

◆ n_threads_http

int LlamaLibrary.gpt_params.n_threads_http
getset

◆ n_ubatch

int LlamaLibrary.gpt_params.n_ubatch
getset

◆ no_kv_offload

bool LlamaLibrary.gpt_params.no_kv_offload
getset

◆ numa

SWIGTYPE_p_ggml_numa_strategy LlamaLibrary.gpt_params.numa
getset

◆ out_file

string LlamaLibrary.gpt_params.out_file
getset

◆ p_split

float LlamaLibrary.gpt_params.p_split
getset

◆ path_prompt_cache

string LlamaLibrary.gpt_params.path_prompt_cache
getset

◆ pooling_type

SWIGTYPE_llama_pooling_type LlamaLibrary.gpt_params.pooling_type
getset

◆ port

int LlamaLibrary.gpt_params.port
getset

◆ ppl_output_type

int LlamaLibrary.gpt_params.ppl_output_type
getset

◆ ppl_stride

int LlamaLibrary.gpt_params.ppl_stride
getset

◆ process_output

bool LlamaLibrary.gpt_params.process_output
getset

◆ prompt

string LlamaLibrary.gpt_params.prompt
getset

◆ prompt_cache_all

bool LlamaLibrary.gpt_params.prompt_cache_all
getset

◆ prompt_cache_ro

bool LlamaLibrary.gpt_params.prompt_cache_ro
getset

◆ prompt_file

string LlamaLibrary.gpt_params.prompt_file
getset

◆ public_path

string LlamaLibrary.gpt_params.public_path
getset

◆ rope_freq_base

float LlamaLibrary.gpt_params.rope_freq_base
getset

◆ rope_freq_scale

float LlamaLibrary.gpt_params.rope_freq_scale
getset

◆ rope_scaling_type

SWIGTYPE_llama_rope_scaling_type LlamaLibrary.gpt_params.rope_scaling_type
getset

◆ rpc_servers

string LlamaLibrary.gpt_params.rpc_servers
getset

◆ seed

uint LlamaLibrary.gpt_params.seed
getset

◆ simple_io

bool LlamaLibrary.gpt_params.simple_io
getset

◆ slot_prompt_similarity

float LlamaLibrary.gpt_params.slot_prompt_similarity
getset

◆ slot_save_path

string LlamaLibrary.gpt_params.slot_save_path
getset

◆ sparams

llama_sampling_params LlamaLibrary.gpt_params.sparams
getset

◆ special

bool LlamaLibrary.gpt_params.special
getset

◆ split_mode

SWIGTYPE_llama_split_mode LlamaLibrary.gpt_params.split_mode
getset

◆ spm_infill

bool LlamaLibrary.gpt_params.spm_infill
getset

◆ ssl_file_cert

string LlamaLibrary.gpt_params.ssl_file_cert
getset

◆ ssl_file_key

string LlamaLibrary.gpt_params.ssl_file_key
getset

◆ system_prompt

string LlamaLibrary.gpt_params.system_prompt
getset

◆ tensor_split

SWIGTYPE_p_float LlamaLibrary.gpt_params.tensor_split
getset

◆ timeout_read

int LlamaLibrary.gpt_params.timeout_read
getset

◆ timeout_write

int LlamaLibrary.gpt_params.timeout_write
getset

◆ usage

bool LlamaLibrary.gpt_params.usage
getset

◆ use_color

bool LlamaLibrary.gpt_params.use_color
getset

◆ use_mlock

bool LlamaLibrary.gpt_params.use_mlock
getset

◆ use_mmap

bool LlamaLibrary.gpt_params.use_mmap
getset

◆ verbose_prompt

bool LlamaLibrary.gpt_params.verbose_prompt
getset

◆ verbosity

int LlamaLibrary.gpt_params.verbosity
getset

◆ warmup

bool LlamaLibrary.gpt_params.warmup
getset

◆ winogrande

bool LlamaLibrary.gpt_params.winogrande
getset

◆ winogrande_tasks

uint LlamaLibrary.gpt_params.winogrande_tasks
getset

◆ yarn_attn_factor

float LlamaLibrary.gpt_params.yarn_attn_factor
getset

◆ yarn_beta_fast

float LlamaLibrary.gpt_params.yarn_beta_fast
getset

◆ yarn_beta_slow

float LlamaLibrary.gpt_params.yarn_beta_slow
getset

◆ yarn_ext_factor

float LlamaLibrary.gpt_params.yarn_ext_factor
getset

◆ yarn_orig_ctx

int LlamaLibrary.gpt_params.yarn_orig_ctx
getset

The documentation for this class was generated from the following file: