Dataset Viewer
Auto-converted to Parquet
text
stringlengths
435
1.65k
prefix
stringlengths
51
700
middle
stringlengths
10
200
suffix
stringlengths
50
700
type
stringclasses
2 values
<fim_prefix> convert_weights_ak_to_gg(layer.wv , &w->wv[i*row_length*row_length/n_multiqueries]); convert_weights_ak_to_gg(layer.w1 , &w->w1[i*row_length*n_ff]); convert_weights_ak_to_gg(layer.w2 , &w->w2[i*n_ff*row_length]); convert_weights_ak_to_gg(layer.w3 , &w->w3[i*row_length*n_ff]); } struct gguf_context * ctx = gguf_init_empty(); std::vector<const char*> tokens; std::vector<float> scores; std::vector<llama_token_type> token_types; for (const my_llama_vocab::token_data & token_data : vocab->id_to_token) {<fim_suffix> scores.push_back(token_data.score); token_types.push_back(token_data.type); } gguf_set_arr_str(ctx, KV_TOKENIZER_LIST, tokens.data(), tokens.size()); gguf_set_arr_data(ctx, KV_TOKENIZER_SCORES, GGUF_TYPE_FLOAT32, scores.data(), scores.size()); gguf_set_arr_data(ctx, KV_TOKENIZER_TOKEN_TYPE, GGUF_TYPE_INT32, token_types.data(), token_types.size()); gguf_set_val_str(ctx, KV_TOKENIZER_MODEL, TOKENIZER_NAME); gguf_set_val_str(ctx, KV_GENERAL_ARCHITECTURE, "llama"); gguf_set_val_str(ctx, KV_GENERAL_NAME, "llama"); // special tokens gguf_set_val_u32(ctx, KV_TOKENIZER_UNK_ID, UNKNOWN_TOKEN_ID);<fim_middle> tokens.push_back(token_data.text.c_str());<fim_end>
convert_weights_ak_to_gg(layer.wv , &w->wv[i*row_length*row_length/n_multiqueries]); convert_weights_ak_to_gg(layer.w1 , &w->w1[i*row_length*n_ff]); convert_weights_ak_to_gg(layer.w2 , &w->w2[i*n_ff*row_length]); convert_weights_ak_to_gg(layer.w3 , &w->w3[i*row_length*n_ff]); } struct gguf_context * ctx = gguf_init_empty(); std::vector<const char*> tokens; std::vector<float> scores; std::vector<llama_token_type> token_types; for (const my_llama_vocab::token_data & token_data : vocab->id_to_token) {
tokens.push_back(token_data.text.c_str());
scores.push_back(token_data.score); token_types.push_back(token_data.type); } gguf_set_arr_str(ctx, KV_TOKENIZER_LIST, tokens.data(), tokens.size()); gguf_set_arr_data(ctx, KV_TOKENIZER_SCORES, GGUF_TYPE_FLOAT32, scores.data(), scores.size()); gguf_set_arr_data(ctx, KV_TOKENIZER_TOKEN_TYPE, GGUF_TYPE_INT32, token_types.data(), token_types.size()); gguf_set_val_str(ctx, KV_TOKENIZER_MODEL, TOKENIZER_NAME); gguf_set_val_str(ctx, KV_GENERAL_ARCHITECTURE, "llama"); gguf_set_val_str(ctx, KV_GENERAL_NAME, "llama"); // special tokens gguf_set_val_u32(ctx, KV_TOKENIZER_UNK_ID, UNKNOWN_TOKEN_ID);
random
<fim_prefix>:vector<llama_seq_id> seq_ids(n_parallel, 0); for (int32_t i = 0; i < n_parallel; ++i) { seq_ids[i] = i; } // evaluate the initial prompt for (size_t i = 0; i < tokens_list.size(); ++i) { common_batch_add(batch, tokens_list[i], i, seq_ids, false); } GGML_ASSERT(batch.n_tokens == (int) tokens_list.size()); if (llama_model_has_encoder(model)) { if (llama_encode(ctx, batch)) { LOG_ERR("%s : failed to eval\n", __func__); return 1; } llama_token decoder_start_token_id = llama_model_decoder_start_token(model); if (decoder_start_token_id == LLAMA_TOKEN_NULL) { decoder_start_token_id = <fim_suffix>; } common_batch_clear(batch); common_batch_add(batch, decoder_start_token_id, 0, seq_ids, false); } // llama_decode will output logits only for the last token of the prompt batch.logits[batch.n_tokens - 1] = true; if (llama_decode(ctx, batch) != 0) { LOG_ERR("%s: llama_decode() failed\n", __func__); return 1; } //// assign the system KV cache to all parallel sequences //// this way, the parallel sequences will "reuse" the prompt tokens without having to copy them //for (int32_t i = 1; i < n_parallel; ++i) { // llama_kv_cache_seq_cp(ctx, 0, i, -1, -1); //} if (n_parallel > 1) { LOG("\n\n%s: ge<fim_middle>llama_vocab_bos(vocab)<fim_end>
:vector<llama_seq_id> seq_ids(n_parallel, 0); for (int32_t i = 0; i < n_parallel; ++i) { seq_ids[i] = i; } // evaluate the initial prompt for (size_t i = 0; i < tokens_list.size(); ++i) { common_batch_add(batch, tokens_list[i], i, seq_ids, false); } GGML_ASSERT(batch.n_tokens == (int) tokens_list.size()); if (llama_model_has_encoder(model)) { if (llama_encode(ctx, batch)) { LOG_ERR("%s : failed to eval\n", __func__); return 1; } llama_token decoder_start_token_id = llama_model_decoder_start_token(model); if (decoder_start_token_id == LLAMA_TOKEN_NULL) { decoder_start_token_id =
llama_vocab_bos(vocab)
; } common_batch_clear(batch); common_batch_add(batch, decoder_start_token_id, 0, seq_ids, false); } // llama_decode will output logits only for the last token of the prompt batch.logits[batch.n_tokens - 1] = true; if (llama_decode(ctx, batch) != 0) { LOG_ERR("%s: llama_decode() failed\n", __func__); return 1; } //// assign the system KV cache to all parallel sequences //// this way, the parallel sequences will "reuse" the prompt tokens without having to copy them //for (int32_t i = 1; i < n_parallel; ++i) { // llama_kv_cache_seq_cp(ctx, 0, i, -1, -1); //} if (n_parallel > 1) { LOG("\n\n%s: ge
ast_based
<fim_prefix> al_use_projection_transform(&last_projection_transform); } bool ImGui_ImplAllegro5_CreateDeviceObjects() { ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData(); // Create an invisible mouse cursor // Because al_hide_mouse_cursor() seems to mess up with the actual inputs.. ALLEGRO_BITMAP* mouse_cursor = al_create_bitmap(8, 8); bd->MouseCursorInvisible = al_create_mouse_cursor(mouse_cursor, 0, 0); al_destroy_bitmap(mouse_cursor); return true; } void ImGui_ImplAllegro5_UpdateTexture(ImTextureData* tex) { if (tex->Status == ImTextureStatus_WantCreate) { // Create and upload new texture to graphics system<fim_suffix> IM_ASSERT(tex->TexID == ImTextureID_Invalid && tex->BackendUserData == nullptr); IM_ASSERT(tex->Format == ImTextureFormat_RGBA32); // Create texture // (Bilinear sampling is required by default. Set 'io.Fonts->Flags |= ImFontAtlasFlags_NoBakedLines' or 'style.AntiAliasedLinesUseTex = false' to allow point/nearest sampling) const int new_bitmap_flags = al_get_new_bitmap_flags(); int new_bitmap_format = al_get_new_bitmap_format(); al_set_new_bitmap_flags(ALLEGRO_MEMORY_BITMAP | ALLEGRO_MIN_LINEAR | ALLEGRO_MAG_LINEAR); al_set_new_bitmap_format(ALLEGRO_PIXEL_FORMAT_ABGR_8888_LE);<fim_middle> //IMGUI_DEBUG_LOG("UpdateTexture #%03d: WantCreate %dx%d\n", tex->UniqueID, tex->Width, tex->Height);<fim_end>
al_use_projection_transform(&last_projection_transform); } bool ImGui_ImplAllegro5_CreateDeviceObjects() { ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData(); // Create an invisible mouse cursor // Because al_hide_mouse_cursor() seems to mess up with the actual inputs.. ALLEGRO_BITMAP* mouse_cursor = al_create_bitmap(8, 8); bd->MouseCursorInvisible = al_create_mouse_cursor(mouse_cursor, 0, 0); al_destroy_bitmap(mouse_cursor); return true; } void ImGui_ImplAllegro5_UpdateTexture(ImTextureData* tex) { if (tex->Status == ImTextureStatus_WantCreate) { // Create and upload new texture to graphics system
//IMGUI_DEBUG_LOG("UpdateTexture #%03d: WantCreate %dx%d\n", tex->UniqueID, tex->Width, tex->Height);
IM_ASSERT(tex->TexID == ImTextureID_Invalid && tex->BackendUserData == nullptr); IM_ASSERT(tex->Format == ImTextureFormat_RGBA32); // Create texture // (Bilinear sampling is required by default. Set 'io.Fonts->Flags |= ImFontAtlasFlags_NoBakedLines' or 'style.AntiAliasedLinesUseTex = false' to allow point/nearest sampling) const int new_bitmap_flags = al_get_new_bitmap_flags(); int new_bitmap_format = al_get_new_bitmap_format(); al_set_new_bitmap_flags(ALLEGRO_MEMORY_BITMAP | ALLEGRO_MIN_LINEAR | ALLEGRO_MAG_LINEAR); al_set_new_bitmap_format(ALLEGRO_PIXEL_FORMAT_ABGR_8888_LE);
random
<fim_prefix> RANGE_DATA(ST, src1_data, src1_step), src1_step, \ RANGE_DATA(DT, dst1_data, dst1_step), dst1_step, __VA_ARGS__) #define TEGRA_UNARYOPSCALE(type, op, src1, sz1, dst, sz, w, h, scales) \ ( \ CAROTENE_NS::isSupportedConfiguration() ? \ parallel_for_(cv::Range(0, h), \ TegraGenOp_##op##_Invoker<const type, type>(src1, sz1, dst, sz, w, h, scales), \ (w * h) / static_cast<double>(1<<16)), \ CV_HAL_ERROR_OK \ : CV_HAL_ERROR_NOT_IMPLEMENTED \ ) TegraUnaryOpScale_Invoker(recip, reciprocal, 1, scale, CAROTENE_NS::CONVERT_POLICY_SATURATE) <fim_suffix> #undef cv_hal_mul8u #define cv_hal_mul8u(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::u8, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales) #undef cv_hal_mul8s #define cv_hal_mul8s(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::s8, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales) #undef cv_hal_mul16u #define cv_hal_mul16u(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::u16, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales) #undef cv_hal_mul16s #define cv_hal_mul16s(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::s16, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales) <fim_middle>TegraUnaryOpScale_Invoker(recipf, reciprocal, 1, scale)<fim_end>
RANGE_DATA(ST, src1_data, src1_step), src1_step, \ RANGE_DATA(DT, dst1_data, dst1_step), dst1_step, __VA_ARGS__) #define TEGRA_UNARYOPSCALE(type, op, src1, sz1, dst, sz, w, h, scales) \ ( \ CAROTENE_NS::isSupportedConfiguration() ? \ parallel_for_(cv::Range(0, h), \ TegraGenOp_##op##_Invoker<const type, type>(src1, sz1, dst, sz, w, h, scales), \ (w * h) / static_cast<double>(1<<16)), \ CV_HAL_ERROR_OK \ : CV_HAL_ERROR_NOT_IMPLEMENTED \ ) TegraUnaryOpScale_Invoker(recip, reciprocal, 1, scale, CAROTENE_NS::CONVERT_POLICY_SATURATE)
TegraUnaryOpScale_Invoker(recipf, reciprocal, 1, scale)
#undef cv_hal_mul8u #define cv_hal_mul8u(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::u8, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales) #undef cv_hal_mul8s #define cv_hal_mul8s(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::s8, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales) #undef cv_hal_mul16u #define cv_hal_mul16u(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::u16, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales) #undef cv_hal_mul16s #define cv_hal_mul16s(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::s16, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales)
ast_based
<fim_prefix>EPLICATE; break; case CV_HAL_BORDER_REFLECT: ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT; break; case CV_HAL_BORDER_WRAP: ctx->border = CAROTENE_NS::BORDER_MODE_WRAP; break; case CV_HAL_BORDER_REFLECT_101: ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT101; break; default: delete ctx; return CV_HAL_ERROR_NOT_IMPLEMENTED; } if(!CAROTENE_NS::isSeparableFilter3x3Supported(CAROTENE_NS::Size2D(16, 16), ctx->border, 3, 3)) { delete ctx; return CV_HAL_ERROR_NOT_IMPLEMENTED; } switch(kernel_type) { case CV_8UC1: ctx->kernelx_data[0]=kernelx_data[0]; <fim_suffix>; ctx->kernelx_data[2]=kernelx_data[2]; ctx->kernely_data[0]=kernely_data[0]; ctx->kernely_data[1]=kernely_data[1]; ctx->kernely_data[2]=kernely_data[2]; break; case CV_8SC1: ctx->kernelx_data[0]=((char*)kernelx_data)[0]; ctx->kernelx_data[1]=((char*)kernelx_data)[1]; ctx->kernelx_data[2]=((char*)kernelx_data)[2]; ctx->kernely_data[0]=((char*)kernely_data)[0]; ctx->kernely_data[1]=((char*)kernely_data)[1]; ctx->kernely_data[2]=((char*)kernely_data)[2]; break; case CV_16UC1: ctx->kernelx_data[0]=((int16_t*)kernelx_data)[0]; ctx->kernelx_data[1]=((int16_t*)kernelx_data)[1]; <fim_middle>ctx->kernelx_data[1]=kernelx_data[1]<fim_end>
EPLICATE; break; case CV_HAL_BORDER_REFLECT: ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT; break; case CV_HAL_BORDER_WRAP: ctx->border = CAROTENE_NS::BORDER_MODE_WRAP; break; case CV_HAL_BORDER_REFLECT_101: ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT101; break; default: delete ctx; return CV_HAL_ERROR_NOT_IMPLEMENTED; } if(!CAROTENE_NS::isSeparableFilter3x3Supported(CAROTENE_NS::Size2D(16, 16), ctx->border, 3, 3)) { delete ctx; return CV_HAL_ERROR_NOT_IMPLEMENTED; } switch(kernel_type) { case CV_8UC1: ctx->kernelx_data[0]=kernelx_data[0];
ctx->kernelx_data[1]=kernelx_data[1]
; ctx->kernelx_data[2]=kernelx_data[2]; ctx->kernely_data[0]=kernely_data[0]; ctx->kernely_data[1]=kernely_data[1]; ctx->kernely_data[2]=kernely_data[2]; break; case CV_8SC1: ctx->kernelx_data[0]=((char*)kernelx_data)[0]; ctx->kernelx_data[1]=((char*)kernelx_data)[1]; ctx->kernelx_data[2]=((char*)kernelx_data)[2]; ctx->kernely_data[0]=((char*)kernely_data)[0]; ctx->kernely_data[1]=((char*)kernely_data)[1]; ctx->kernely_data[2]=((char*)kernely_data)[2]; break; case CV_16UC1: ctx->kernelx_data[0]=((int16_t*)kernelx_data)[0]; ctx->kernelx_data[1]=((int16_t*)kernelx_data)[1];
ast_based
<fim_prefix> -> RefType { return values_.Get(id); } // Returns a new block formed by applying `transform(elem_id)` to each element // in the specified block. template <typename TransformFnT> auto Transform(IdT id, TransformFnT transform) -> IdT { llvm::SmallVector<ElementType> block(llvm::map_range(Get(id), transform)); return Add(block); } // Adds a block or finds an existing canonical block with the given content, // and returns an ID to reference it. auto AddCanonical(ConstRefType content) -> IdT { if (content.empty()) { return IdT::Empty; } auto result = canonical_blocks_.Insert( content, [&] { return Add(content); }, KeyContext(this)); return <fim_suffix>; } // Promotes an existing block ID to a canonical block ID, or returns an // existing canonical block ID if the block was already added. The specified // block must not be modified after this point. auto MakeCanonical(IdT id) -> IdT { // Get the content first so that we don't have unnecessary translation of // the `id` into the content during insertion. auto result = canonical_blocks_.Insert( Get(id), [id] { return id; }, KeyContext(this)); return result.key(); } auto OutputYaml() const -> Yaml::OutputMapping { return Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) { for (auto [block_id, block] : values_.enumerate()) { map.Add<fim_middle>result.key()<fim_end>
-> RefType { return values_.Get(id); } // Returns a new block formed by applying `transform(elem_id)` to each element // in the specified block. template <typename TransformFnT> auto Transform(IdT id, TransformFnT transform) -> IdT { llvm::SmallVector<ElementType> block(llvm::map_range(Get(id), transform)); return Add(block); } // Adds a block or finds an existing canonical block with the given content, // and returns an ID to reference it. auto AddCanonical(ConstRefType content) -> IdT { if (content.empty()) { return IdT::Empty; } auto result = canonical_blocks_.Insert( content, [&] { return Add(content); }, KeyContext(this)); return
result.key()
; } // Promotes an existing block ID to a canonical block ID, or returns an // existing canonical block ID if the block was already added. The specified // block must not be modified after this point. auto MakeCanonical(IdT id) -> IdT { // Get the content first so that we don't have unnecessary translation of // the `id` into the content during insertion. auto result = canonical_blocks_.Insert( Get(id), [id] { return id; }, KeyContext(this)); return result.key(); } auto OutputYaml() const -> Yaml::OutputMapping { return Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) { for (auto [block_id, block] : values_.enumerate()) { map.Add
ast_based
<fim_prefix> size = size * nmemb; auto *buf = reinterpret_cast<std::string *>(userp); buf->append(reinterpret_cast<const char *>(contents), size); return size; } #endif // In the ideal scenario, Tesseract will start working on data as soon // as it can. For example, if you stream a filelist through stdin, we // should start the OCR process as soon as the first filename is // available. This is particularly useful when hooking Tesseract up to // slow hardware such as a book scanning machine. // // Unfortunately there are tradeoffs. You can't seek on stdin. That // makes automatic detection of datatype (TIFF? filelist? PNG?)<fim_suffix>// stdin. We'll still do our best if the user likes pipes. bool TessBaseAPI::ProcessPagesInternal(const char *filename, const char *retry_config, int timeout_millisec, TessResultRenderer *renderer) { bool stdInput = !strcmp(filename, "stdin") || !strcmp(filename, "-"); if (stdInput) { #ifdef WIN32 if (_setmode(_fileno(stdin), _O_BINARY) == -1) tprintf("ERROR: cin to binary: %s", strerror(errno)); #endif // WIN32 } if (stream_filelist) { return ProcessPagesFileList(stdin, nullptr, retry_config, timeout_millisec, renderer, tesseract_->tessedit_page_number); } <fim_middle>// impractical. So we support a command line flag to explicitly // identify the scenario that really matters: filelists on<fim_end>
size = size * nmemb; auto *buf = reinterpret_cast<std::string *>(userp); buf->append(reinterpret_cast<const char *>(contents), size); return size; } #endif // In the ideal scenario, Tesseract will start working on data as soon // as it can. For example, if you stream a filelist through stdin, we // should start the OCR process as soon as the first filename is // available. This is particularly useful when hooking Tesseract up to // slow hardware such as a book scanning machine. // // Unfortunately there are tradeoffs. You can't seek on stdin. That // makes automatic detection of datatype (TIFF? filelist? PNG?)
// impractical. So we support a command line flag to explicitly // identify the scenario that really matters: filelists on
// stdin. We'll still do our best if the user likes pipes. bool TessBaseAPI::ProcessPagesInternal(const char *filename, const char *retry_config, int timeout_millisec, TessResultRenderer *renderer) { bool stdInput = !strcmp(filename, "stdin") || !strcmp(filename, "-"); if (stdInput) { #ifdef WIN32 if (_setmode(_fileno(stdin), _O_BINARY) == -1) tprintf("ERROR: cin to binary: %s", strerror(errno)); #endif // WIN32 } if (stream_filelist) { return ProcessPagesFileList(stdin, nullptr, retry_config, timeout_millisec, renderer, tesseract_->tessedit_page_number); }
random
<fim_prefix>(curlcode != CURLE_OK) { return error("curl_easy_setopt"); } } curlcode = curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); if (curlcode != CURLE_OK) { return error("curl_easy_setopt"); } curlcode = curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buf); if (curlcode != CURLE_OK) { return error("curl_easy_setopt"); } curlcode = curl_easy_setopt(curl, CURLOPT_USERAGENT, "Tesseract OCR"); if (curlcode != CURLE_OK) { return error("curl_easy_setopt"); } curlcode = curl_easy_perform(curl); if (curlcode != CURLE_OK) { return error("curl_easy_perform"); } <fim_suffix>; data = reinterpret_cast<const l_uint8 *>(buf.data()); } #else fprintf(stderr, "Error, this tesseract has no URL support\n"); return false; #endif } else { // Check whether the input file can be read. if (FILE *file = fopen(filename, "rb")) { fclose(file); } else { fprintf(stderr, "Error, cannot read input file %s: %s\n", filename, strerror(errno)); return false; } } // Here is our autodetection int format; int r = (data != nullptr) ? findFileFormatBuffer(data, &format) : findFileFormat(filename, &format); // Maybe we have a filelist if (r != 0 || format == IFF_UNKNOWN) { std::string s; if (data != nullptr) { <fim_middle>curl_easy_cleanup(curl)<fim_end>
(curlcode != CURLE_OK) { return error("curl_easy_setopt"); } } curlcode = curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); if (curlcode != CURLE_OK) { return error("curl_easy_setopt"); } curlcode = curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buf); if (curlcode != CURLE_OK) { return error("curl_easy_setopt"); } curlcode = curl_easy_setopt(curl, CURLOPT_USERAGENT, "Tesseract OCR"); if (curlcode != CURLE_OK) { return error("curl_easy_setopt"); } curlcode = curl_easy_perform(curl); if (curlcode != CURLE_OK) { return error("curl_easy_perform"); }
curl_easy_cleanup(curl)
; data = reinterpret_cast<const l_uint8 *>(buf.data()); } #else fprintf(stderr, "Error, this tesseract has no URL support\n"); return false; #endif } else { // Check whether the input file can be read. if (FILE *file = fopen(filename, "rb")) { fclose(file); } else { fprintf(stderr, "Error, cannot read input file %s: %s\n", filename, strerror(errno)); return false; } } // Here is our autodetection int format; int r = (data != nullptr) ? findFileFormatBuffer(data, &format) : findFileFormat(filename, &format); // Maybe we have a filelist if (r != 0 || format == IFF_UNKNOWN) { std::string s; if (data != nullptr) {
ast_based
<fim_prefix>} void Engine::get_singletons(List<Singleton> *p_singletons) { for (const Singleton &E : singletons) { #ifdef TOOLS_ENABLED if (!is_editor_hint() && E.editor_only) { continue; } #endif p_singletons->push_back(E); } } String Engine::get_write_movie_path() const { return write_movie_path; } void Engine::set_write_movie_path(const String &p_path) { write_movie_path = p_path; } void Engine::set_shader_cache_path(const String &p_path) { shader_cache_path = p_path; } String Engine::get_shader_cache_path() const { return shader_cache_path; } Engine *Engine::get_singleton() { return singleton; } bool Engine::notify_frame_server_synced() { frame_server_synced = true;<fim_suffix> freeze_time_scale = p_frozen; } void Engine::set_embedded_in_editor(bool p_enabled) { embedded_in_editor = p_enabled; } bool Engine::is_embedded_in_editor() const { return embedded_in_editor; } Engine::Engine() { singleton = this; } Engine::~Engine() { if (singleton == this) { singleton = nullptr; } } Engine::Singleton::Singleton(const StringName &p_name, Object *p_ptr, const StringName &p_class_name) : name(p_name), ptr(p_ptr), class_name(p_class_name) { #ifdef DEBUG_ENABLED RefCounted *rc = Object::cast_to<RefCounted>(p_ptr); if (rc && !rc->is_referenced()) {<fim_middle> return server_syncs > SERVER_SYNC_FRAME_COUNT_WARNING; } void Engine::set_freeze_time_scale(bool p_frozen) {<fim_end>
} void Engine::get_singletons(List<Singleton> *p_singletons) { for (const Singleton &E : singletons) { #ifdef TOOLS_ENABLED if (!is_editor_hint() && E.editor_only) { continue; } #endif p_singletons->push_back(E); } } String Engine::get_write_movie_path() const { return write_movie_path; } void Engine::set_write_movie_path(const String &p_path) { write_movie_path = p_path; } void Engine::set_shader_cache_path(const String &p_path) { shader_cache_path = p_path; } String Engine::get_shader_cache_path() const { return shader_cache_path; } Engine *Engine::get_singleton() { return singleton; } bool Engine::notify_frame_server_synced() { frame_server_synced = true;
return server_syncs > SERVER_SYNC_FRAME_COUNT_WARNING; } void Engine::set_freeze_time_scale(bool p_frozen) {
freeze_time_scale = p_frozen; } void Engine::set_embedded_in_editor(bool p_enabled) { embedded_in_editor = p_enabled; } bool Engine::is_embedded_in_editor() const { return embedded_in_editor; } Engine::Engine() { singleton = this; } Engine::~Engine() { if (singleton == this) { singleton = nullptr; } } Engine::Singleton::Singleton(const StringName &p_name, Object *p_ptr, const StringName &p_class_name) : name(p_name), ptr(p_ptr), class_name(p_class_name) { #ifdef DEBUG_ENABLED RefCounted *rc = Object::cast_to<RefCounted>(p_ptr); if (rc && !rc->is_referenced()) {
random
<fim_prefix>ressed() && !mb->is_shift_pressed() && !mb->is_command_or_control_pressed(); if (mb->is_double_click() && !moving_selection && no_mod_key_pressed) { int x = pos.x - timeline->get_name_limit(); float ofs = x / timeline->get_zoom_scale() + timeline->get_value(); emit_signal(SNAME("timeline_changed"), ofs, false); } for (const KeyValue<int, Rect2> &E : subtracks) { if (E.value.has_point(mb->get_position())) { if (!locked_tracks.has(E.key) && !hidden_tracks.has(E.key)) { set_animation_and_track(animation, E.key, read_only); _clear_selection(); } return; } } for (const KeyValue<int, RBMap<int, Rect2>> &E : subtrack_icons) { int track = E.key; <fim_suffix> for (const KeyValue<int, Rect2> &I : track_icons) { if (I.value.has_point(mb->get_position())) { if (I.key == REMOVE_ICON) { if (!read_only) { EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton(); undo_redo->create_action("Remove Bezier Track", UndoRedo::MERGE_DISABLE, animation.ptr()); undo_redo->add_do_method(this, "_update_locked_tracks_after", track); undo_redo->add_do_method(this, "_update_hidden_tracks_after", track); undo_redo->add_do_method(animation.ptr(), "remove_track", track); undo_redo->add_undo_method(animation.ptr(), "add_track", Animation::TrackType::TYPE_BEZIER, track); undo_redo->add<fim_middle>RBMap<int, Rect2> track_icons = E.value;<fim_end>
ressed() && !mb->is_shift_pressed() && !mb->is_command_or_control_pressed(); if (mb->is_double_click() && !moving_selection && no_mod_key_pressed) { int x = pos.x - timeline->get_name_limit(); float ofs = x / timeline->get_zoom_scale() + timeline->get_value(); emit_signal(SNAME("timeline_changed"), ofs, false); } for (const KeyValue<int, Rect2> &E : subtracks) { if (E.value.has_point(mb->get_position())) { if (!locked_tracks.has(E.key) && !hidden_tracks.has(E.key)) { set_animation_and_track(animation, E.key, read_only); _clear_selection(); } return; } } for (const KeyValue<int, RBMap<int, Rect2>> &E : subtrack_icons) { int track = E.key;
RBMap<int, Rect2> track_icons = E.value;
for (const KeyValue<int, Rect2> &I : track_icons) { if (I.value.has_point(mb->get_position())) { if (I.key == REMOVE_ICON) { if (!read_only) { EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton(); undo_redo->create_action("Remove Bezier Track", UndoRedo::MERGE_DISABLE, animation.ptr()); undo_redo->add_do_method(this, "_update_locked_tracks_after", track); undo_redo->add_do_method(this, "_update_hidden_tracks_after", track); undo_redo->add_do_method(animation.ptr(), "remove_track", track); undo_redo->add_undo_method(animation.ptr(), "add_track", Animation::TrackType::TYPE_BEZIER, track); undo_redo->add
ast_based
<fim_prefix>( \ depth == CV_8U && CAROTENE_NS::isSupportedConfiguration() ? \ scn == 3 ? \ (swapBlue ? \ parallel_for_(cv::Range(0, height), \ TegraCvtColor_rgb2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \ (width * height) / static_cast<double>(1<<16)) : \ parallel_for_(cv::Range(0, height), \ TegraCvtColor_bgr2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \ (width * height) / static_cast<double>(1<<16)) ), \ CV_HAL_ERROR_OK : \<fim_suffix> (swapBlue ? \ parallel_for_(cv::Range(0, height), \ TegraCvtColor_rgbx2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \ (width * height) / static_cast<double>(1<<16)) : \ parallel_for_(cv::Range(0, height), \ TegraCvtColor_bgrx2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \ (width * height) / static_cast<double>(1<<16)) ), \ CV_HAL_ERROR_OK : \ CV_HAL_ERROR_NOT_IMPLEMENTED \ : CV_HAL_ERROR_NOT_IMPLEMENTED \ ) <fim_middle> scn == 4 ? \<fim_end>
( \ depth == CV_8U && CAROTENE_NS::isSupportedConfiguration() ? \ scn == 3 ? \ (swapBlue ? \ parallel_for_(cv::Range(0, height), \ TegraCvtColor_rgb2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \ (width * height) / static_cast<double>(1<<16)) : \ parallel_for_(cv::Range(0, height), \ TegraCvtColor_bgr2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \ (width * height) / static_cast<double>(1<<16)) ), \ CV_HAL_ERROR_OK : \
scn == 4 ? \
(swapBlue ? \ parallel_for_(cv::Range(0, height), \ TegraCvtColor_rgbx2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \ (width * height) / static_cast<double>(1<<16)) : \ parallel_for_(cv::Range(0, height), \ TegraCvtColor_bgrx2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \ (width * height) / static_cast<double>(1<<16)) ), \ CV_HAL_ERROR_OK : \ CV_HAL_ERROR_NOT_IMPLEMENTED \ : CV_HAL_ERROR_NOT_IMPLEMENTED \ )
random
<fim_prefix>get_relative()); mouse_event_info.pos = p_event_pos; hover_prev_pos = p_event_pos; } ev->set_button_mask(event_buttons_mask); ev->set_pressure(p_pressure); ev->set_tilt(p_tilt); Input::get_singleton()->parse_input_event(ev); } break; case AMOTION_EVENT_ACTION_SCROLL: { Ref<InputEventMouseButton> ev; ev.instantiate(); _set_key_modifier_state(ev, Key::NONE); if (p_source_mouse_relative) { ev->set_position(hover_prev_pos); ev->set_global_position(hover_prev_pos); } else { ev->set_position(p_event_pos); ev->set_global_position(p_event_pos); } ev->set_pressed(true); buttons_state = event_buttons_mask; if (p_delta.y > 0) { <fim_suffix>; } else if (p_delta.y < 0) { _wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_DOWN, -p_delta.y); } if (p_delta.x > 0) { _wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_RIGHT, p_delta.x); } else if (p_delta.x < 0) { _wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_LEFT, -p_delta.x); } } break; } } void AndroidInputHandler::_wheel_button_click(BitField<MouseButtonMask> event_buttons_mask, const Ref<InputEventMouseButton> &ev, MouseButton wheel_button, float factor) { Ref<InputEventMouseButton> evd = ev->duplicate(); _set_key_modifier_state(evd, Key::NONE); evd->set_button_index(wheel_button); evd->set_button_mask(e<fim_middle>_wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_UP, p_delta.y)<fim_end>
get_relative()); mouse_event_info.pos = p_event_pos; hover_prev_pos = p_event_pos; } ev->set_button_mask(event_buttons_mask); ev->set_pressure(p_pressure); ev->set_tilt(p_tilt); Input::get_singleton()->parse_input_event(ev); } break; case AMOTION_EVENT_ACTION_SCROLL: { Ref<InputEventMouseButton> ev; ev.instantiate(); _set_key_modifier_state(ev, Key::NONE); if (p_source_mouse_relative) { ev->set_position(hover_prev_pos); ev->set_global_position(hover_prev_pos); } else { ev->set_position(p_event_pos); ev->set_global_position(p_event_pos); } ev->set_pressed(true); buttons_state = event_buttons_mask; if (p_delta.y > 0) {
_wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_UP, p_delta.y)
; } else if (p_delta.y < 0) { _wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_DOWN, -p_delta.y); } if (p_delta.x > 0) { _wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_RIGHT, p_delta.x); } else if (p_delta.x < 0) { _wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_LEFT, -p_delta.x); } } break; } } void AndroidInputHandler::_wheel_button_click(BitField<MouseButtonMask> event_buttons_mask, const Ref<InputEventMouseButton> &ev, MouseButton wheel_button, float factor) { Ref<InputEventMouseButton> evd = ev->duplicate(); _set_key_modifier_state(evd, Key::NONE); evd->set_button_index(wheel_button); evd->set_button_mask(e
ast_based
<fim_prefix>ng(rect_width_); tsv_str += "\t" + std::to_string(rect_height_); tsv_str += "\t-1\t\n"; const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator()); while (!res_it->Empty(RIL_BLOCK)) { if (res_it->Empty(RIL_WORD)) { res_it->Next(RIL_WORD); continue; } // Add rows for any new block/paragraph/textline. if (res_it->IsAtBeginningOf(RIL_BLOCK)) { block_num++; par_num = 0; line_num = 0; word_num = 0; tsv_str += "2\t" + std::to_string(page_num); // level 2 - block tsv_str += "\t" + std::to_string(block_num); tsv_str += "\t" + std::to_string(par_num); tsv_str += "\t" + std::to_string(line_num); <fim_suffix>; AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str); tsv_str += "\t-1\t\n"; // end of row for block } if (res_it->IsAtBeginningOf(RIL_PARA)) { par_num++; line_num = 0; word_num = 0; tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph tsv_str += "\t" + std::to_string(block_num); tsv_str += "\t" + std::to_string(par_num); tsv_str += "\t" + std::to_string(line_num); tsv_str += "\t" + std::to_string(word_num); AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str); tsv_str += "\t-1\t\n"; // end of row for para } if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) { line_num++; word_num = 0; tsv_str<fim_middle>tsv_str += "\t" + std::to_string(word_num)<fim_end>
ng(rect_width_); tsv_str += "\t" + std::to_string(rect_height_); tsv_str += "\t-1\t\n"; const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator()); while (!res_it->Empty(RIL_BLOCK)) { if (res_it->Empty(RIL_WORD)) { res_it->Next(RIL_WORD); continue; } // Add rows for any new block/paragraph/textline. if (res_it->IsAtBeginningOf(RIL_BLOCK)) { block_num++; par_num = 0; line_num = 0; word_num = 0; tsv_str += "2\t" + std::to_string(page_num); // level 2 - block tsv_str += "\t" + std::to_string(block_num); tsv_str += "\t" + std::to_string(par_num); tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num)
; AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str); tsv_str += "\t-1\t\n"; // end of row for block } if (res_it->IsAtBeginningOf(RIL_PARA)) { par_num++; line_num = 0; word_num = 0; tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph tsv_str += "\t" + std::to_string(block_num); tsv_str += "\t" + std::to_string(par_num); tsv_str += "\t" + std::to_string(line_num); tsv_str += "\t" + std::to_string(word_num); AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str); tsv_str += "\t-1\t\n"; // end of row for para } if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) { line_num++; word_num = 0; tsv_str
ast_based
<fim_prefix> if (p_ofs_valid) { if (editor->snap_keys->is_pressed() && editor->step->get_value() != 0) { insert_pos = editor->snap_time(insert_pos); } } float dst_time = key.time + insert_pos; int existing_idx = animation->track_find_key(selected_track, dst_time, Animation::FIND_MODE_APPROX); Variant value = key.value; if (key.track_type != Animation::TYPE_BEZIER) { value = animation->make_default_bezier_key(key.value); } undo_redo->add_do_method(animation.ptr(), "track_insert_key", selected_track, dst_time, value, key.transition); undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", selected_track, dst_time); <fim_suffix> if (existing_idx != -1) { undo_redo->add_undo_method(animation.ptr(), "track_insert_key", selected_track, dst_time, animation->track_get_key_value(selected_track, existing_idx), animation->track_get_key_transition(selected_track, existing_idx)); } } undo_redo->add_do_method(this, "_clear_selection_for_anim", animation); undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation); // Reselect pasted. int i = 0; for (const Pair<int, float> &E : new_selection_values) { undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0); i++; } i = 0; for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {<fim_middle> Pair<int, float> p; p.first = selected_track; p.second = dst_time; new_selection_values.push_back(p);<fim_end>
if (p_ofs_valid) { if (editor->snap_keys->is_pressed() && editor->step->get_value() != 0) { insert_pos = editor->snap_time(insert_pos); } } float dst_time = key.time + insert_pos; int existing_idx = animation->track_find_key(selected_track, dst_time, Animation::FIND_MODE_APPROX); Variant value = key.value; if (key.track_type != Animation::TYPE_BEZIER) { value = animation->make_default_bezier_key(key.value); } undo_redo->add_do_method(animation.ptr(), "track_insert_key", selected_track, dst_time, value, key.transition); undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", selected_track, dst_time);
Pair<int, float> p; p.first = selected_track; p.second = dst_time; new_selection_values.push_back(p);
if (existing_idx != -1) { undo_redo->add_undo_method(animation.ptr(), "track_insert_key", selected_track, dst_time, animation->track_get_key_value(selected_track, existing_idx), animation->track_get_key_transition(selected_track, existing_idx)); } } undo_redo->add_do_method(this, "_clear_selection_for_anim", animation); undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation); // Reselect pasted. int i = 0; for (const Pair<int, float> &E : new_selection_values) { undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0); i++; } i = 0; for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
random
<fim_prefix> LOG_INF("%s: n_ctx: %u\n", __func__, params->n_ctx); LOG_INF("%s: n_embd: %u\n", __func__, params->n_embd); LOG_INF("%s: n_mult: %u\n", __func__, params->n_mult); LOG_INF("%s: n_head: %u\n", __func__, params->n_head); LOG_INF("%s: n_head_kv: %u\n", __func__, params->n_head_kv); LOG_INF("%s: n_ff: %u\n", __func__, params->n_ff); LOG_INF("%s: n_layer: %u\n", __func__, params->n_layer); LOG_INF("%s: n_rot: %u\n", __func__, params->n_rot); } static void print_tensor_info(const struct ggml_context * ctx) { for (auto * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {<fim_suffix> if (i > 0) { LOG_INF("x "); } LOG_INF("[%" PRId64 "] ", t->ne[i]); total *= t->ne[i]; } if (i > 1) { LOG_INF("= [%" PRId64 "] ", total); } LOG_INF("float space for %s\n", ggml_get_name(t)); } } static void init_model(struct my_llama_model * model) { const auto & hparams = model->hparams; const uint32_t n_embd = hparams.n_embd; const uint32_t n_layer = hparams.n_layer; const uint32_t n_vocab = hparams.n_vocab; const uint32_t n_multiqueries = hparams.n_head_kv <= 0 || hparams.n_head_kv >= hparams.n_head ? 1 : hparams.n_head / hparams.n_head_kv; const uint32_t n_ff = hparams.n_ff;<fim_middle> LOG_INF("%s: Allocating ", __func__); int64_t total = 1; int i = 0; for (; i < ggml_n_dims(t); ++i) {<fim_end>
LOG_INF("%s: n_ctx: %u\n", __func__, params->n_ctx); LOG_INF("%s: n_embd: %u\n", __func__, params->n_embd); LOG_INF("%s: n_mult: %u\n", __func__, params->n_mult); LOG_INF("%s: n_head: %u\n", __func__, params->n_head); LOG_INF("%s: n_head_kv: %u\n", __func__, params->n_head_kv); LOG_INF("%s: n_ff: %u\n", __func__, params->n_ff); LOG_INF("%s: n_layer: %u\n", __func__, params->n_layer); LOG_INF("%s: n_rot: %u\n", __func__, params->n_rot); } static void print_tensor_info(const struct ggml_context * ctx) { for (auto * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
LOG_INF("%s: Allocating ", __func__); int64_t total = 1; int i = 0; for (; i < ggml_n_dims(t); ++i) {
if (i > 0) { LOG_INF("x "); } LOG_INF("[%" PRId64 "] ", t->ne[i]); total *= t->ne[i]; } if (i > 1) { LOG_INF("= [%" PRId64 "] ", total); } LOG_INF("float space for %s\n", ggml_get_name(t)); } } static void init_model(struct my_llama_model * model) { const auto & hparams = model->hparams; const uint32_t n_embd = hparams.n_embd; const uint32_t n_layer = hparams.n_layer; const uint32_t n_vocab = hparams.n_vocab; const uint32_t n_multiqueries = hparams.n_head_kv <= 0 || hparams.n_head_kv >= hparams.n_head ? 1 : hparams.n_head / hparams.n_head_kv; const uint32_t n_ff = hparams.n_ff;
random
<fim_prefix>ntSession(scope, "") {} ClientSession::ClientSession(const Scope& scope, const SessionOptions& session_options) { Session* new_session; absl::Status status = NewSession(session_options, &new_session); TF_CHECK_OK(status) << status; impl_.reset(new Impl(new_session, scope.graph_as_shared_ptr())); CHECK_NOTNULL(impl()->session_.get()); } // Define destructor here so we can forward declare `Impl` in client_session.h. // If we define a dtor in the header file or use the default dtor, // unique_ptr<Impl> needs the complete type. ClientSession::~ClientSession() {} SessionOptions ClientSession::Impl::MakeDefaultSessionOptions( const string& target) { <fim_suffix> return options; } absl::Status ClientSession::Run(const std::vector<Output>& fetch_outputs, std::vector<Tensor>* outputs) const { return Run(FeedType{}, fetch_outputs, {}, outputs); } absl::Status ClientSession::Run(const FeedType& inputs, const std::vector<Output>& fetch_outputs, std::vector<Tensor>* outputs) const { return Run(inputs, fetch_outputs, {}, outputs); } absl::Status ClientSession::Run(const FeedType& inputs, const std::vector<Output>& fetch_outputs, const std::vector<Operation>& run_outputs, <fim_middle>SessionOptions options; options.env = Env::Default(); options.target = target;<fim_end>
ntSession(scope, "") {} ClientSession::ClientSession(const Scope& scope, const SessionOptions& session_options) { Session* new_session; absl::Status status = NewSession(session_options, &new_session); TF_CHECK_OK(status) << status; impl_.reset(new Impl(new_session, scope.graph_as_shared_ptr())); CHECK_NOTNULL(impl()->session_.get()); } // Define destructor here so we can forward declare `Impl` in client_session.h. // If we define a dtor in the header file or use the default dtor, // unique_ptr<Impl> needs the complete type. ClientSession::~ClientSession() {} SessionOptions ClientSession::Impl::MakeDefaultSessionOptions( const string& target) {
SessionOptions options; options.env = Env::Default(); options.target = target;
return options; } absl::Status ClientSession::Run(const std::vector<Output>& fetch_outputs, std::vector<Tensor>* outputs) const { return Run(FeedType{}, fetch_outputs, {}, outputs); } absl::Status ClientSession::Run(const FeedType& inputs, const std::vector<Output>& fetch_outputs, std::vector<Tensor>* outputs) const { return Run(inputs, fetch_outputs, {}, outputs); } absl::Status ClientSession::Run(const FeedType& inputs, const std::vector<Output>& fetch_outputs, const std::vector<Operation>& run_outputs,
ast_based
<fim_prefix>#pragma once #include <filesystem> #include <stdexcept> #include <string> #include <utility> namespace fs = std::filesystem; class Dlhandle { void *chandle = nullptr; public: class Exception : public std::runtime_error { public: using std::runtime_error::runtime_error; }; Dlhandle() = default; Dlhandle(const fs::path &fpath);<fim_suffix> Dlhandle(Dlhandle &&o) : chandle(o.chandle) { o.chandle = nullptr; } ~Dlhandle(); Dlhandle &operator=(Dlhandle &&o) { chandle = std::exchange(o.chandle, nullptr); return *this; } template <typename T> T *get(const std::string &symbol) const { return reinterpret_cast<T *>(get_internal(symbol.c_str())); } auto get_fnc(const std::string &symbol) const { return get<void*(...)>(symbol); } private: void *get_internal(const char *symbol) const; }; <fim_middle> Dlhandle(const Dlhandle &o) = delete;<fim_end>
#pragma once #include <filesystem> #include <stdexcept> #include <string> #include <utility> namespace fs = std::filesystem; class Dlhandle { void *chandle = nullptr; public: class Exception : public std::runtime_error { public: using std::runtime_error::runtime_error; }; Dlhandle() = default; Dlhandle(const fs::path &fpath);
Dlhandle(const Dlhandle &o) = delete;
Dlhandle(Dlhandle &&o) : chandle(o.chandle) { o.chandle = nullptr; } ~Dlhandle(); Dlhandle &operator=(Dlhandle &&o) { chandle = std::exchange(o.chandle, nullptr); return *this; } template <typename T> T *get(const std::string &symbol) const { return reinterpret_cast<T *>(get_internal(symbol.c_str())); } auto get_fnc(const std::string &symbol) const { return get<void*(...)>(symbol); } private: void *get_internal(const char *symbol) const; };
random
<fim_prefix>n, frame); } if (t.is_null()) { return Rect2(); } Size2 s = t->get_size(); Point2 ofs = offset; if (centered) { ofs -= s / 2; } if (s == Size2(0, 0)) { s = Size2(1, 1); } return Rect2(ofs, s); } void AnimatedSprite2D::_validate_property(PropertyInfo &p_property) const { if (frames.is_null()) { return; } if (!Engine::get_singleton()->is_editor_hint()) { if (p_property.name == "frame" && playing) { p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY; } return; } if (p_property.name == "animation") { List<StringName> names; frames->get_animation_list(&names); names.sort_custom<StringName::AlphCompare>(); bool current_found = false; <fim_suffix> for (const StringName &E : names) { if (!is_first_element) { p_property.hint_string += ","; } else { is_first_element = false; } p_property.hint_string += String(E); if (animation == E) { current_found = true; } } if (!current_found) { if (p_property.hint_string.is_empty()) { p_property.hint_string = String(animation); } else { p_property.hint_string = String(animation) + "," + p_property.hint_string; } } return; } if (p_property.name == "frame") { if (playing) { p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY; return; } p_property.hint = PROPERTY_HINT_RANGE; if (frames->has_animation(animation<fim_middle>bool is_first_element = true;<fim_end>
n, frame); } if (t.is_null()) { return Rect2(); } Size2 s = t->get_size(); Point2 ofs = offset; if (centered) { ofs -= s / 2; } if (s == Size2(0, 0)) { s = Size2(1, 1); } return Rect2(ofs, s); } void AnimatedSprite2D::_validate_property(PropertyInfo &p_property) const { if (frames.is_null()) { return; } if (!Engine::get_singleton()->is_editor_hint()) { if (p_property.name == "frame" && playing) { p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY; } return; } if (p_property.name == "animation") { List<StringName> names; frames->get_animation_list(&names); names.sort_custom<StringName::AlphCompare>(); bool current_found = false;
bool is_first_element = true;
for (const StringName &E : names) { if (!is_first_element) { p_property.hint_string += ","; } else { is_first_element = false; } p_property.hint_string += String(E); if (animation == E) { current_found = true; } } if (!current_found) { if (p_property.hint_string.is_empty()) { p_property.hint_string = String(animation); } else { p_property.hint_string = String(animation) + "," + p_property.hint_string; } } return; } if (p_property.name == "frame") { if (playing) { p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY; return; } p_property.hint = PROPERTY_HINT_RANGE; if (frames->has_animation(animation
ast_based
<fim_prefix> Run(grpc_completion_queue_functor* cb, int) { auto* callback = static_cast<ShutdownCallback*>(cb); delete callback->cq_; delete callback; } private: grpc::CompletionQueue* cq_ = nullptr; }; } // namespace ::grpc::CompletionQueue* Channel::CallbackCQ() { // TODO(vjpai): Consider using a single global CQ for the default CQ // if there is no explicit per-channel CQ registered CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_acquire); if (callback_cq != nullptr) { return callback_cq; } // The callback_cq_ wasn't already set, so grab a lock and set it up exactly // once for this channel. grpc::internal::MutexLock l(&mu_); callback_cq = <fim_suffix>; if (callback_cq == nullptr) { if (grpc_iomgr_run_in_background()) { // gRPC-core provides the backing needed for the preferred CQ type auto* shutdown_callback = new ShutdownCallback; callback_cq = new grpc::CompletionQueue(grpc_completion_queue_attributes{ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING, shutdown_callback}); // Transfer ownership of the new cq to its own shutdown callback shutdown_callback->TakeCQ(callback_cq); } else { // Otherwise we need to use the alternative CQ variant callback_cq = CompletionQueue::CallbackAlternativeCQ(); } callback_cq_.store(callback_cq, std::memory<fim_middle>callback_cq_.load(std::memory_order_relaxed)<fim_end>
Run(grpc_completion_queue_functor* cb, int) { auto* callback = static_cast<ShutdownCallback*>(cb); delete callback->cq_; delete callback; } private: grpc::CompletionQueue* cq_ = nullptr; }; } // namespace ::grpc::CompletionQueue* Channel::CallbackCQ() { // TODO(vjpai): Consider using a single global CQ for the default CQ // if there is no explicit per-channel CQ registered CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_acquire); if (callback_cq != nullptr) { return callback_cq; } // The callback_cq_ wasn't already set, so grab a lock and set it up exactly // once for this channel. grpc::internal::MutexLock l(&mu_); callback_cq =
callback_cq_.load(std::memory_order_relaxed)
; if (callback_cq == nullptr) { if (grpc_iomgr_run_in_background()) { // gRPC-core provides the backing needed for the preferred CQ type auto* shutdown_callback = new ShutdownCallback; callback_cq = new grpc::CompletionQueue(grpc_completion_queue_attributes{ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING, shutdown_callback}); // Transfer ownership of the new cq to its own shutdown callback shutdown_callback->TakeCQ(callback_cq); } else { // Otherwise we need to use the alternative CQ variant callback_cq = CompletionQueue::CallbackAlternativeCQ(); } callback_cq_.store(callback_cq, std::memory
ast_based
<fim_prefix>// stdin. We'll still do our best if the user likes pipes. bool TessBaseAPI::ProcessPagesInternal(const char *filename, const char *retry_config, int timeout_millisec, TessResultRenderer *renderer) { bool stdInput = !strcmp(filename, "stdin") || !strcmp(filename, "-"); if (stdInput) { #ifdef WIN32 if (_setmode(_fileno(stdin), _O_BINARY) == -1) tprintf("ERROR: cin to binary: %s", strerror(errno)); #endif // WIN32 } if (stream_filelist) { return ProcessPagesFileList(stdin, nullptr, retry_config, timeout_millisec, renderer, tesseract_->tessedit_page_number); } <fim_suffix> // That means any data in stdin must be buffered, to make it // seekable. std::string buf; const l_uint8 *data = nullptr; if (stdInput) { buf.assign((std::istreambuf_iterator<char>(std::cin)), (std::istreambuf_iterator<char>())); data = reinterpret_cast<const l_uint8 *>(buf.data()); } else if (strstr(filename, "://") != nullptr) { // Get image or image list by URL. #ifdef HAVE_LIBCURL CURL *curl = curl_easy_init(); if (curl == nullptr) { fprintf(stderr, "Error, curl_easy_init failed\n"); return false; } else { CURLcode curlcode; auto error = [curl, &curlcode](const char *function) {<fim_middle> // At this point we are officially in autodection territory.<fim_end>
// stdin. We'll still do our best if the user likes pipes. bool TessBaseAPI::ProcessPagesInternal(const char *filename, const char *retry_config, int timeout_millisec, TessResultRenderer *renderer) { bool stdInput = !strcmp(filename, "stdin") || !strcmp(filename, "-"); if (stdInput) { #ifdef WIN32 if (_setmode(_fileno(stdin), _O_BINARY) == -1) tprintf("ERROR: cin to binary: %s", strerror(errno)); #endif // WIN32 } if (stream_filelist) { return ProcessPagesFileList(stdin, nullptr, retry_config, timeout_millisec, renderer, tesseract_->tessedit_page_number); }
// At this point we are officially in autodection territory.
// That means any data in stdin must be buffered, to make it // seekable. std::string buf; const l_uint8 *data = nullptr; if (stdInput) { buf.assign((std::istreambuf_iterator<char>(std::cin)), (std::istreambuf_iterator<char>())); data = reinterpret_cast<const l_uint8 *>(buf.data()); } else if (strstr(filename, "://") != nullptr) { // Get image or image list by URL. #ifdef HAVE_LIBCURL CURL *curl = curl_easy_init(); if (curl == nullptr) { fprintf(stderr, "Error, curl_easy_init failed\n"); return false; } else { CURLcode curlcode; auto error = [curl, &curlcode](const char *function) {
random
<fim_prefix>/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */ /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ #include "engine.h" #include "core/authors.gen.h" #include "core/config/project_settings.h" #include "core/donors.gen.h" #include "core/license.gen.h" #include "core/variant/typed_array.h"<fim_suffix>#include "servers/rendering/rendering_device.h" void Engine::set_physics_ticks_per_second(int p_ips) { ERR_FAIL_COND_MSG(p_ips <= 0, "Engine iterations per second must be greater than 0."); ips = p_ips; } int Engine::get_physics_ticks_per_second() const { return ips; } void Engine::set_max_physics_steps_per_frame(int p_max_physics_steps) { ERR_FAIL_COND_MSG(p_max_physics_steps <= 0, "Maximum number of physics steps per frame must be greater than 0."); max_physics_steps_per_frame = p_max_physics_steps; } int Engine::get_max_physics_steps_per_frame() const { return max_physics_steps_per_frame; } void Engine::set_physics_jitter_fix(double p_threshold) { if (p_threshold < 0) {<fim_middle>#include "core/version.h"<fim_end>
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */ /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ #include "engine.h" #include "core/authors.gen.h" #include "core/config/project_settings.h" #include "core/donors.gen.h" #include "core/license.gen.h" #include "core/variant/typed_array.h"
#include "core/version.h"
#include "servers/rendering/rendering_device.h" void Engine::set_physics_ticks_per_second(int p_ips) { ERR_FAIL_COND_MSG(p_ips <= 0, "Engine iterations per second must be greater than 0."); ips = p_ips; } int Engine::get_physics_ticks_per_second() const { return ips; } void Engine::set_max_physics_steps_per_frame(int p_max_physics_steps) { ERR_FAIL_COND_MSG(p_max_physics_steps <= 0, "Maximum number of physics steps per frame must be greater than 0."); max_physics_steps_per_frame = p_max_physics_steps; } int Engine::get_max_physics_steps_per_frame() const { return max_physics_steps_per_frame; } void Engine::set_physics_jitter_fix(double p_threshold) { if (p_threshold < 0) {
random
<fim_prefix> if (!block_it.data()->pdblk.poly_block()->IsText()) { continue; } ++num_blocks; } if (!num_blocks) { tprintf("WARNING: Found no blocks\n"); return; } *block_orientation = new int[num_blocks]; *vertical_writing = new bool[num_blocks]; block_it.move_to_first(); int i = 0; for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) { if (!block_it.data()->pdblk.poly_block()->IsText()) { continue; } FCOORD re_rotation = block_it.data()->re_rotation(); float re_theta = re_rotation.angle(); FCOORD classify_rotation = block_it.data()->classify_rotation(); float classify_theta = classify_rotation.angle();<fim_suffix> } int num_rotations = static_cast<int>(rot_theta + 0.5); (*block_orientation)[i] = num_rotations; // The classify_rotation is non-zero only if the text has vertical // writing direction. (*vertical_writing)[i] = classify_rotation.y() != 0.0f; ++i; } } void TessBaseAPI::DetectParagraphs(bool after_text_recognition) { int debug_level = 0; GetIntVariable("paragraph_debug_level", &debug_level); if (paragraph_models_ == nullptr) { paragraph_models_ = new std::vector<ParagraphModel *>; } MutableIterator *result_it = GetMutableIterator(); do { // Detect paragraphs for this block std::vector<ParagraphModel *> models;<fim_middle> double rot_theta = -(re_theta - classify_theta) * 2.0 / M_PI; if (rot_theta < 0) { rot_theta += 4;<fim_end>
if (!block_it.data()->pdblk.poly_block()->IsText()) { continue; } ++num_blocks; } if (!num_blocks) { tprintf("WARNING: Found no blocks\n"); return; } *block_orientation = new int[num_blocks]; *vertical_writing = new bool[num_blocks]; block_it.move_to_first(); int i = 0; for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) { if (!block_it.data()->pdblk.poly_block()->IsText()) { continue; } FCOORD re_rotation = block_it.data()->re_rotation(); float re_theta = re_rotation.angle(); FCOORD classify_rotation = block_it.data()->classify_rotation(); float classify_theta = classify_rotation.angle();
double rot_theta = -(re_theta - classify_theta) * 2.0 / M_PI; if (rot_theta < 0) { rot_theta += 4;
} int num_rotations = static_cast<int>(rot_theta + 0.5); (*block_orientation)[i] = num_rotations; // The classify_rotation is non-zero only if the text has vertical // writing direction. (*vertical_writing)[i] = classify_rotation.y() != 0.0f; ++i; } } void TessBaseAPI::DetectParagraphs(bool after_text_recognition) { int debug_level = 0; GetIntVariable("paragraph_debug_level", &debug_level); if (paragraph_models_ == nullptr) { paragraph_models_ = new std::vector<ParagraphModel *>; } MutableIterator *result_it = GetMutableIterator(); do { // Detect paragraphs for this block std::vector<ParagraphModel *> models;
random
<fim_prefix>x)); } } undo_redo->add_do_method(this, "_clear_selection_for_anim", animation); undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation); // Reselect pasted. int i = 0; for (const Pair<int, float> &E : new_selection_values) { undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0); i++; } i = 0; for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) { undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, animation->track_get_key_time(E->get().first, E->get().second), i == 0); i++; } AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton(); if (ape) { <fim_suffix>; undo_redo->add_undo_method(ape, "_animation_update_key_frame"); } undo_redo->add_do_method(this, "queue_redraw"); undo_redo->add_undo_method(this, "queue_redraw"); undo_redo->commit_action(); } } void AnimationBezierTrackEdit::delete_selection() { if (selection.size()) { EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton(); undo_redo->create_action(TTR("Animation Delete Keys")); for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) { undo_redo->add_do_method(animation.ptr(), "track_remove_key", E->get().first, E->get().second); undo_redo->add_undo_method(animation.ptr(), "track_insert_key", E->get().first, animation->track_get_<fim_middle>undo_redo->add_do_method(ape, "_animation_update_key_frame")<fim_end>
x)); } } undo_redo->add_do_method(this, "_clear_selection_for_anim", animation); undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation); // Reselect pasted. int i = 0; for (const Pair<int, float> &E : new_selection_values) { undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0); i++; } i = 0; for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) { undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, animation->track_get_key_time(E->get().first, E->get().second), i == 0); i++; } AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton(); if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame")
; undo_redo->add_undo_method(ape, "_animation_update_key_frame"); } undo_redo->add_do_method(this, "queue_redraw"); undo_redo->add_undo_method(this, "queue_redraw"); undo_redo->commit_action(); } } void AnimationBezierTrackEdit::delete_selection() { if (selection.size()) { EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton(); undo_redo->create_action(TTR("Animation Delete Keys")); for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) { undo_redo->add_do_method(animation.ptr(), "track_remove_key", E->get().first, E->get().second); undo_redo->add_undo_method(animation.ptr(), "track_insert_key", E->get().first, animation->track_get_
ast_based
<fim_prefix>lic std::runtime_error { public: using std::runtime_error::runtime_error; }; class UnsupportedModelError: public std::runtime_error { public: using std::runtime_error::runtime_error; }; struct GPUDevice { const char *backend; int index; int type; size_t heapSize; std::string name; std::string vendor; GPUDevice(const char *backend, int index, int type, size_t heapSize, std::string name, std::string vendor): backend(backend), index(index), type(type), heapSize(heapSize), name(std::move(name)), vendor(std::move(vendor)) {} std::string selectionName() const <fim_suffix> std::string backendName() const { return backendIdToName(backend); } static std::string backendIdToName(const std::string &backend) { return s_backendNames.at(backend); } static std::string updateSelectionName(const std::string &name) { if (name == "Auto" || name == "CPU" || name == "Metal") return name; auto it = std::find_if(s_backendNames.begin(), s_backendNames.end(), [&name](const auto &entry) { return name.starts_with(entry.second + ": "); }); if (it != s_backendNames.end()) return name; return "Vulkan: " + name; // previously, there were only Vulkan d<fim_middle>{ assert(backend == "cuda"s || backend == "kompute"s); return backendName() + ": " + name; }<fim_end>
lic std::runtime_error { public: using std::runtime_error::runtime_error; }; class UnsupportedModelError: public std::runtime_error { public: using std::runtime_error::runtime_error; }; struct GPUDevice { const char *backend; int index; int type; size_t heapSize; std::string name; std::string vendor; GPUDevice(const char *backend, int index, int type, size_t heapSize, std::string name, std::string vendor): backend(backend), index(index), type(type), heapSize(heapSize), name(std::move(name)), vendor(std::move(vendor)) {} std::string selectionName() const
{ assert(backend == "cuda"s || backend == "kompute"s); return backendName() + ": " + name; }
std::string backendName() const { return backendIdToName(backend); } static std::string backendIdToName(const std::string &backend) { return s_backendNames.at(backend); } static std::string updateSelectionName(const std::string &name) { if (name == "Auto" || name == "CPU" || name == "Metal") return name; auto it = std::find_if(s_backendNames.begin(), s_backendNames.end(), [&name](const auto &entry) { return name.starts_with(entry.second + ": "); }); if (it != s_backendNames.end()) return name; return "Vulkan: " + name; // previously, there were only Vulkan d
ast_based
<fim_prefix>/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ #ifdef ACCESSKIT_ENABLED #include "accessibility_driver_accesskit.h" #include "core/config/project_settings.h" #include "core/version.h" #include "servers/text_server.h" AccessibilityDriverAccessKit *AccessibilityDriverAccessKit::singleton = nullptr; _FORCE_INLINE_ accesskit_role AccessibilityDriverAccessKit::_accessibility_role(DisplayServer::AccessibilityRole p_role) const { if (role_map.has(p_role)) { return role_map[p_role]; } return ACCESSKIT_ROLE_UNKNOWN; }<fim_suffix> if (action_map.has(p_action)) { return action_map[p_action]; } return ACCESSKIT_ACTION_CLICK; } bool AccessibilityDriverAccessKit::window_create(DisplayServer::WindowID p_window_id, void *p_handle) { ERR_FAIL_COND_V(windows.has(p_window_id), false); WindowData &wd = windows[p_window_id]; AccessibilityElement *ae = memnew(AccessibilityElement); ae->role = ACCESSKIT_ROLE_WINDOW; ae->window_id = p_window_id; wd.root_id = rid_owner.make_rid(ae); #ifdef WINDOWS_ENABLED<fim_middle> _FORCE_INLINE_ accesskit_action AccessibilityDriverAccessKit::_accessibility_action(DisplayServer::AccessibilityAction p_action) const {<fim_end>
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /**************************************************************************/ #ifdef ACCESSKIT_ENABLED #include "accessibility_driver_accesskit.h" #include "core/config/project_settings.h" #include "core/version.h" #include "servers/text_server.h" AccessibilityDriverAccessKit *AccessibilityDriverAccessKit::singleton = nullptr; _FORCE_INLINE_ accesskit_role AccessibilityDriverAccessKit::_accessibility_role(DisplayServer::AccessibilityRole p_role) const { if (role_map.has(p_role)) { return role_map[p_role]; } return ACCESSKIT_ROLE_UNKNOWN; }
_FORCE_INLINE_ accesskit_action AccessibilityDriverAccessKit::_accessibility_action(DisplayServer::AccessibilityAction p_action) const {
if (action_map.has(p_action)) { return action_map[p_action]; } return ACCESSKIT_ACTION_CLICK; } bool AccessibilityDriverAccessKit::window_create(DisplayServer::WindowID p_window_id, void *p_handle) { ERR_FAIL_COND_V(windows.has(p_window_id), false); WindowData &wd = windows[p_window_id]; AccessibilityElement *ae = memnew(AccessibilityElement); ae->role = ACCESSKIT_ROLE_WINDOW; ae->window_id = p_window_id; wd.root_id = rid_owner.make_rid(ae); #ifdef WINDOWS_ENABLED
random
<fim_prefix>_rect.position + Vector2(-inner_ofs, selection_rect.size.height + inner_ofs), accent, limit, right_limit); _draw_line_clipped(selection_rect.position + Vector2(selection_rect.size.width + inner_ofs, -inner_ofs), selection_rect.position + selection_rect.size + Vector2(inner_ofs, inner_ofs), accent, limit, right_limit); } selection_handles_rect.position = selection_rect.position - Vector2(outer_ofs, outer_ofs); selection_handles_rect.size = selection_rect.size + Vector2(outer_ofs, outer_ofs) * 2; } if (box_selecting) { Vector2 bs_from = box_selection_from; Vector2 bs_to = box_selection_to; if (bs_from.x > bs_to.x) { SWAP(bs_from.x, bs_to.x); } <fim_suffix> draw_rect( Rect2(bs_from, bs_to - bs_from), get_theme_color(SNAME("box_selection_fill_color"), EditorStringName(Editor))); draw_rect( Rect2(bs_from, bs_to - bs_from), get_theme_color(SNAME("box_selection_stroke_color"), EditorStringName(Editor)), false, Math::round(EDSCALE)); } } break; } } // Check if a track is displayed in the bezier editor (track type = bezier and track not filtered). bool AnimationBezierTrackEdit::_is_track_displayed(int p_track_index) { if (animation->track_get_type(p_track_index) != Animation::TrackType::TYPE_BEZIER) { return false; } if (is_filtered) { String path = String(animation->track_get_path(p_track_i<fim_middle>if (bs_from.y > bs_to.y) { SWAP(bs_from.y, bs_to.y); }<fim_end>
_rect.position + Vector2(-inner_ofs, selection_rect.size.height + inner_ofs), accent, limit, right_limit); _draw_line_clipped(selection_rect.position + Vector2(selection_rect.size.width + inner_ofs, -inner_ofs), selection_rect.position + selection_rect.size + Vector2(inner_ofs, inner_ofs), accent, limit, right_limit); } selection_handles_rect.position = selection_rect.position - Vector2(outer_ofs, outer_ofs); selection_handles_rect.size = selection_rect.size + Vector2(outer_ofs, outer_ofs) * 2; } if (box_selecting) { Vector2 bs_from = box_selection_from; Vector2 bs_to = box_selection_to; if (bs_from.x > bs_to.x) { SWAP(bs_from.x, bs_to.x); }
if (bs_from.y > bs_to.y) { SWAP(bs_from.y, bs_to.y); }
draw_rect( Rect2(bs_from, bs_to - bs_from), get_theme_color(SNAME("box_selection_fill_color"), EditorStringName(Editor))); draw_rect( Rect2(bs_from, bs_to - bs_from), get_theme_color(SNAME("box_selection_stroke_color"), EditorStringName(Editor)), false, Math::round(EDSCALE)); } } break; } } // Check if a track is displayed in the bezier editor (track type = bezier and track not filtered). bool AnimationBezierTrackEdit::_is_track_displayed(int p_track_index) { if (animation->track_get_type(p_track_index) != Animation::TrackType::TYPE_BEZIER) { return false; } if (is_filtered) { String path = String(animation->track_get_path(p_track_i
ast_based
<fim_prefix>(0, SEEK_SET); } } size_t tell() const { #ifdef _WIN32 __int64 ret = _ftelli64(fp); #else long ret = std::ftell(fp); #endif GGML_ASSERT(ret != -1); // this really shouldn't fail return (size_t) ret; } void seek(size_t offset, int whence) { #ifdef _WIN32 int ret = _fseeki64(fp, (__int64) offset, whence); #else int ret = std::fseek(fp, (long) offset, whence); #endif GGML_ASSERT(ret == 0); // same } void read_raw(void * ptr, size_t size) { if (size == 0) { return; } errno = 0; std::size_t ret = std::fread(ptr, size, 1, fp); if (ferror(fp)) { <fim_suffix>; } if (ret != 1) { die("unexpectedly reached end of file"); } } std::uint32_t read_u32() { std::uint32_t ret; read_raw(&ret, sizeof(ret)); return ret; } std::float_t read_f32() { std::float_t ret; read_raw(&ret, sizeof(ret)); return ret; } std::string read_string(std::uint32_t len) { std::vector<char> chars(len); read_raw(chars.data(), len); return std::string(chars.data(), len); } ~my_llama_file() { if (fp) { std::fclose(fp); } } }; static bool is_ggml_file(const char * filename) { my_llama_file file(filename, "r<fim_middle>die_fmt("fread failed: %s", strerror(errno))<fim_end>
(0, SEEK_SET); } } size_t tell() const { #ifdef _WIN32 __int64 ret = _ftelli64(fp); #else long ret = std::ftell(fp); #endif GGML_ASSERT(ret != -1); // this really shouldn't fail return (size_t) ret; } void seek(size_t offset, int whence) { #ifdef _WIN32 int ret = _fseeki64(fp, (__int64) offset, whence); #else int ret = std::fseek(fp, (long) offset, whence); #endif GGML_ASSERT(ret == 0); // same } void read_raw(void * ptr, size_t size) { if (size == 0) { return; } errno = 0; std::size_t ret = std::fread(ptr, size, 1, fp); if (ferror(fp)) {
die_fmt("fread failed: %s", strerror(errno))
; } if (ret != 1) { die("unexpectedly reached end of file"); } } std::uint32_t read_u32() { std::uint32_t ret; read_raw(&ret, sizeof(ret)); return ret; } std::float_t read_f32() { std::float_t ret; read_raw(&ret, sizeof(ret)); return ret; } std::string read_string(std::uint32_t len) { std::vector<char> chars(len); read_raw(chars.data(), len); return std::string(chars.data(), len); } ~my_llama_file() { if (fp) { std::fclose(fp); } } }; static bool is_ggml_file(const char * filename) { my_llama_file file(filename, "r
ast_based
<fim_prefix>oid DeserializeFileDB(const fs::path& path, Data&& data) { FILE* file = fsbridge::fopen(path, "rb"); AutoFile filein{file}; if (filein.IsNull()) { throw DbNotFoundError{}; } DeserializeDB(filein, data); } } // namespace CBanDB::CBanDB(fs::path ban_list_path) : m_banlist_dat(ban_list_path + ".dat"), m_banlist_json(ban_list_path + ".json") { } bool CBanDB::Write(const banmap_t& banSet) { std::vector<std::string> errors; if (common::WriteSettings(m_banlist_json, {{JSON_KEY, BanMapToJson(banSet)}}, errors)) { return true; } for (const auto& err : errors) { LogError("%s\n", err); } return false; } bool CBanDB::Read(<fim_suffix>) { if (fs::exists(m_banlist_dat)) { LogWarning("banlist.dat ignored because it can only be read by " CLIENT_NAME " version 22.x. Remove %s to silence this warning.", fs::quoted(fs::PathToString(m_banlist_dat))); } // If the JSON banlist does not exist, then recreate it if (!fs::exists(m_banlist_json)) { return false; } std::map<std::string, common::SettingsValue> settings; std::vector<std::string> errors; if (!common::ReadSettings(m_banlist_json, settings, errors)) { for (const auto& err : errors) { LogWarning("Cannot load banlist %s: %s", fs::PathToString(m_banlist_json), err); } return false; } <fim_middle>banmap_t& banSet<fim_end>
oid DeserializeFileDB(const fs::path& path, Data&& data) { FILE* file = fsbridge::fopen(path, "rb"); AutoFile filein{file}; if (filein.IsNull()) { throw DbNotFoundError{}; } DeserializeDB(filein, data); } } // namespace CBanDB::CBanDB(fs::path ban_list_path) : m_banlist_dat(ban_list_path + ".dat"), m_banlist_json(ban_list_path + ".json") { } bool CBanDB::Write(const banmap_t& banSet) { std::vector<std::string> errors; if (common::WriteSettings(m_banlist_json, {{JSON_KEY, BanMapToJson(banSet)}}, errors)) { return true; } for (const auto& err : errors) { LogError("%s\n", err); } return false; } bool CBanDB::Read(
banmap_t& banSet
) { if (fs::exists(m_banlist_dat)) { LogWarning("banlist.dat ignored because it can only be read by " CLIENT_NAME " version 22.x. Remove %s to silence this warning.", fs::quoted(fs::PathToString(m_banlist_dat))); } // If the JSON banlist does not exist, then recreate it if (!fs::exists(m_banlist_json)) { return false; } std::map<std::string, common::SettingsValue> settings; std::vector<std::string> errors; if (!common::ReadSettings(m_banlist_json, settings, errors)) { for (const auto& err : errors) { LogWarning("Cannot load banlist %s: %s", fs::PathToString(m_banlist_json), err); } return false; }
ast_based
<fim_prefix> mCalibData->cameraMatrix.at<double>(0,0) = mCalibData->cameraMatrix.at<double>(1,1); } } if(!(mCalibFlags & cv::CALIB_ZERO_TANGENT_DIST)) { const double eps = 0.005; if(fabs(mCalibData->distCoeffs.at<double>(2)) < eps && fabs(mCalibData->distCoeffs.at<double>(3)) < eps) mCalibFlags |= cv::CALIB_ZERO_TANGENT_DIST; } if(!(mCalibFlags & cv::CALIB_FIX_K1)) { const double eps = 0.005; if(fabs(mCalibData->distCoeffs.at<double>(0)) < eps) mCalibFlags |= cv::CALIB_FIX_K1;<fim_suffix> if(fabs(mCalibData->distCoeffs.at<double>(1)) < eps) mCalibFlags |= cv::CALIB_FIX_K2; } if(!(mCalibFlags & cv::CALIB_FIX_K3)) { const double eps = 0.005; if(fabs(mCalibData->distCoeffs.at<double>(4)) < eps) mCalibFlags |= cv::CALIB_FIX_K3; } } } bool calib::calibController::getCommonCalibrationState() const { int rating = (int)getFramesNumberState() + (int)getConfidenceIntrervalsState() + (int)getRMSState() + (int)mCoverageQualityState; return rating == 4; } bool calib::calibController::getFramesNumberState() const {<fim_middle> } if(!(mCalibFlags & cv::CALIB_FIX_K2)) { const double eps = 0.005;<fim_end>
mCalibData->cameraMatrix.at<double>(0,0) = mCalibData->cameraMatrix.at<double>(1,1); } } if(!(mCalibFlags & cv::CALIB_ZERO_TANGENT_DIST)) { const double eps = 0.005; if(fabs(mCalibData->distCoeffs.at<double>(2)) < eps && fabs(mCalibData->distCoeffs.at<double>(3)) < eps) mCalibFlags |= cv::CALIB_ZERO_TANGENT_DIST; } if(!(mCalibFlags & cv::CALIB_FIX_K1)) { const double eps = 0.005; if(fabs(mCalibData->distCoeffs.at<double>(0)) < eps) mCalibFlags |= cv::CALIB_FIX_K1;
} if(!(mCalibFlags & cv::CALIB_FIX_K2)) { const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(1)) < eps) mCalibFlags |= cv::CALIB_FIX_K2; } if(!(mCalibFlags & cv::CALIB_FIX_K3)) { const double eps = 0.005; if(fabs(mCalibData->distCoeffs.at<double>(4)) < eps) mCalibFlags |= cv::CALIB_FIX_K3; } } } bool calib::calibController::getCommonCalibrationState() const { int rating = (int)getFramesNumberState() + (int)getConfidenceIntrervalsState() + (int)getRMSState() + (int)mCoverageQualityState; return rating == 4; } bool calib::calibController::getFramesNumberState() const {
random
<fim_prefix>buft_override { const char * pattern; ggml_backend_buffer_type_t buft; }; struct llama_model_params { // NULL-terminated list of devices to use for offloading (if NULL, all available devices are used) ggml_backend_dev_t * devices; // NULL-terminated list of buffer types to use for tensors that match a pattern const struct llama_model_tensor_buft_override * tensor_buft_overrides; int32_t n_gpu_layers; // number of layers to store in VRAM enum llama_split_mode split_mode; // how to split the model across multiple GPUs // the GPU that is used for the entire model when split_mode is LLAMA_SPLIT_MODE_NONE <fim_suffix> // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() const float * tensor_split; // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. // If the provided progress_callback returns true, model loading continues. // If it returns false, model loading is immediately aborted. llama_progress_callback progress_callback; // context pointer passed to the progress callback void * progress_callback_user_data; // override key-value pairs of the model meta data const struct llama_model_kv_override * kv_overrides; // Keep the booleans together to avoi<fim_middle>int32_t main_gpu;<fim_end>
buft_override { const char * pattern; ggml_backend_buffer_type_t buft; }; struct llama_model_params { // NULL-terminated list of devices to use for offloading (if NULL, all available devices are used) ggml_backend_dev_t * devices; // NULL-terminated list of buffer types to use for tensors that match a pattern const struct llama_model_tensor_buft_override * tensor_buft_overrides; int32_t n_gpu_layers; // number of layers to store in VRAM enum llama_split_mode split_mode; // how to split the model across multiple GPUs // the GPU that is used for the entire model when split_mode is LLAMA_SPLIT_MODE_NONE
int32_t main_gpu;
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() const float * tensor_split; // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. // If the provided progress_callback returns true, model loading continues. // If it returns false, model loading is immediately aborted. llama_progress_callback progress_callback; // context pointer passed to the progress callback void * progress_callback_user_data; // override key-value pairs of the model meta data const struct llama_model_kv_override * kv_overrides; // Keep the booleans together to avoi
ast_based
<fim_prefix> nullptr, 0, nullptr, nullptr, false, &mgr) == 0) { osd_tess = osd_tesseract_; osd_tesseract_->set_source_resolution(thresholder_->GetSourceYResolution()); } else { tprintf( "Warning: Auto orientation and script detection requested," " but osd language failed to load\n"); delete osd_tesseract_; osd_tesseract_ = nullptr; } } } #endif // ndef DISABLED_LEGACY_ENGINE if (tesseract_->SegmentPage(input_file_.c_str(), block_list_, osd_tess, &osr) < 0) { return -1; } // If Devanagari is being recognized, we use different images for page seg // and for OCR. <fim_suffix>; return 0; } /** * Return average gradient of lines on page. */ float TessBaseAPI::GetGradient() { return tesseract_->gradient(); } /** Delete the pageres and clear the block list ready for a new page. */ void TessBaseAPI::ClearResults() { if (tesseract_ != nullptr) { tesseract_->Clear(); } delete page_res_; page_res_ = nullptr; recognition_done_ = false; if (block_list_ == nullptr) { block_list_ = new BLOCK_LIST; } else { block_list_->clear(); } if (paragraph_models_ != nullptr) { for (auto model : *paragraph_models_) { delete model; } delete paragraph_models_; paragraph_models_ = nullptr; } } /** * Return the length of the ou<fim_middle>tesseract_->PrepareForTessOCR(block_list_, osd_tess, &osr)<fim_end>
nullptr, 0, nullptr, nullptr, false, &mgr) == 0) { osd_tess = osd_tesseract_; osd_tesseract_->set_source_resolution(thresholder_->GetSourceYResolution()); } else { tprintf( "Warning: Auto orientation and script detection requested," " but osd language failed to load\n"); delete osd_tesseract_; osd_tesseract_ = nullptr; } } } #endif // ndef DISABLED_LEGACY_ENGINE if (tesseract_->SegmentPage(input_file_.c_str(), block_list_, osd_tess, &osr) < 0) { return -1; } // If Devanagari is being recognized, we use different images for page seg // and for OCR.
tesseract_->PrepareForTessOCR(block_list_, osd_tess, &osr)
; return 0; } /** * Return average gradient of lines on page. */ float TessBaseAPI::GetGradient() { return tesseract_->gradient(); } /** Delete the pageres and clear the block list ready for a new page. */ void TessBaseAPI::ClearResults() { if (tesseract_ != nullptr) { tesseract_->Clear(); } delete page_res_; page_res_ = nullptr; recognition_done_ = false; if (block_list_ == nullptr) { block_list_ = new BLOCK_LIST; } else { block_list_->clear(); } if (paragraph_models_ != nullptr) { for (auto model : *paragraph_models_) { delete model; } delete paragraph_models_; paragraph_models_ = nullptr; } } /** * Return the length of the ou
ast_based
<fim_prefix> type = LLAMA_TOKEN_TYPE_CONTROL; } else if (text.empty()) { type = LLAMA_TOKEN_TYPE_CONTROL; } else if (sscanf(text.c_str(), "<0x%02hhX>", &byte_val) == 1) { // Text of byte tokens is already in the expected format. type = LLAMA_TOKEN_TYPE_BYTE; } else { type = LLAMA_TOKEN_TYPE_NORMAL; } text = llama_escape_whitespaces(text); vocab->id_to_token[id].text = text; vocab->id_to_token[id].score = score; vocab->id_to_token[id].type = type; vocab->token_to_id.emplace(text, id); } }<fim_suffix> size *= gg_weights->ne[dim]; } for (int ct = 0; ct < size; ++ct) { int64_t i0 = 0; int64_t i1 = 0; int64_t i2 = 0; int64_t i3 = 0; ggml_unravel_index(gg_weights, ct, &i0, &i1, &i2, &i3); ggml_set_f32_nd(gg_weights, i0, i1, i2, i3, karpathy_weights[ct]); } } static void save_as_llama_model( struct my_llama_vocab * vocab, struct my_llama_model * model, TransformerWeights* w, const char * filename ) { // convert AK weights into GG weights one by one. // w->token_embedding_table -> model->tok_embeddings // float* -> struct ggml_tensor<fim_middle>} static void convert_weights_ak_to_gg(struct ggml_tensor * gg_weights, const float * karpathy_weights) { int size = 1; for (int dim = 0; dim < ggml_n_dims(gg_weights); ++dim) {<fim_end>
type = LLAMA_TOKEN_TYPE_CONTROL; } else if (text.empty()) { type = LLAMA_TOKEN_TYPE_CONTROL; } else if (sscanf(text.c_str(), "<0x%02hhX>", &byte_val) == 1) { // Text of byte tokens is already in the expected format. type = LLAMA_TOKEN_TYPE_BYTE; } else { type = LLAMA_TOKEN_TYPE_NORMAL; } text = llama_escape_whitespaces(text); vocab->id_to_token[id].text = text; vocab->id_to_token[id].score = score; vocab->id_to_token[id].type = type; vocab->token_to_id.emplace(text, id); } }
} static void convert_weights_ak_to_gg(struct ggml_tensor * gg_weights, const float * karpathy_weights) { int size = 1; for (int dim = 0; dim < ggml_n_dims(gg_weights); ++dim) {
size *= gg_weights->ne[dim]; } for (int ct = 0; ct < size; ++ct) { int64_t i0 = 0; int64_t i1 = 0; int64_t i2 = 0; int64_t i3 = 0; ggml_unravel_index(gg_weights, ct, &i0, &i1, &i2, &i3); ggml_set_f32_nd(gg_weights, i0, i1, i2, i3, karpathy_weights[ct]); } } static void save_as_llama_model( struct my_llama_vocab * vocab, struct my_llama_model * model, TransformerWeights* w, const char * filename ) { // convert AK weights into GG weights one by one. // w->token_embedding_table -> model->tok_embeddings // float* -> struct ggml_tensor
random
<fim_prefix> if (vflip) { dst_rect.size.y = -dst_rect.size.y; } texture->draw_rect_region(ci, dst_rect, Rect2(Vector2(), texture->get_size()), Color(1, 1, 1), false); } break; } } void AnimatedSprite2D::set_sprite_frames(const Ref<SpriteFrames> &p_frames) { if (frames == p_frames) { return; } if (frames.is_valid()) { frames->disconnect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed)); } stop(); frames = p_frames; if (frames.is_valid()) { frames->connect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed)); List<StringName> al; frames->get_animation_list(&al); if (al.is_empty()) { set_animation(StringName()); <fim_suffix>; } else { if (!frames->has_animation(animation)) { set_animation(al.front()->get()); } if (!frames->has_animation(autoplay)) { autoplay = String(); } } } notify_property_list_changed(); queue_redraw(); update_configuration_warnings(); emit_signal("sprite_frames_changed"); } Ref<SpriteFrames> AnimatedSprite2D::get_sprite_frames() const { return frames; } void AnimatedSprite2D::set_frame(int p_frame) { set_frame_and_progress(p_frame, std::signbit(get_playing_speed()) ? 1.0 : 0.0); } int AnimatedSprite2D::get_frame() const { return frame; } void AnimatedSprite2D::set_frame_progress(real_t p_progress) { frame_progress = p_progress; } real_t AnimatedSprite2<fim_middle>autoplay = String()<fim_end>
if (vflip) { dst_rect.size.y = -dst_rect.size.y; } texture->draw_rect_region(ci, dst_rect, Rect2(Vector2(), texture->get_size()), Color(1, 1, 1), false); } break; } } void AnimatedSprite2D::set_sprite_frames(const Ref<SpriteFrames> &p_frames) { if (frames == p_frames) { return; } if (frames.is_valid()) { frames->disconnect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed)); } stop(); frames = p_frames; if (frames.is_valid()) { frames->connect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed)); List<StringName> al; frames->get_animation_list(&al); if (al.is_empty()) { set_animation(StringName());
autoplay = String()
; } else { if (!frames->has_animation(animation)) { set_animation(al.front()->get()); } if (!frames->has_animation(autoplay)) { autoplay = String(); } } } notify_property_list_changed(); queue_redraw(); update_configuration_warnings(); emit_signal("sprite_frames_changed"); } Ref<SpriteFrames> AnimatedSprite2D::get_sprite_frames() const { return frames; } void AnimatedSprite2D::set_frame(int p_frame) { set_frame_and_progress(p_frame, std::signbit(get_playing_speed()) ? 1.0 : 0.0); } int AnimatedSprite2D::get_frame() const { return frame; } void AnimatedSprite2D::set_frame_progress(real_t p_progress) { frame_progress = p_progress; } real_t AnimatedSprite2
ast_based
<fim_prefix> float penalty_present); // 0.0 = disabled /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982 LLAMA_API struct llama_sampler * llama_sampler_init_dry( const struct llama_vocab * vocab, int32_t n_ctx_train, float dry_multiplier, float dry_base, int32_t dry_allowed_length, int32_t dry_penalty_last_n,<fim_suffix> LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias( int32_t n_vocab, int32_t n_logit_bias, const llama_logit_bias * logit_bias); // this sampler is meant to be used for fill-in-the-middle infilling // it's supposed to be used after top_k + top_p sampling // // 1. if the sum of the EOG probs times the number of candidates is higher than the sum of the other probs -> pick EOG // 2. combine probs of tokens that have the same prefix // // example: // // - before: // "hel": 0.5 // "hell": 0.2 // "hello": 0.1 // "dummy": 0.1 //<fim_middle> const char ** seq_breakers, size_t num_breakers); <fim_end>
float penalty_present); // 0.0 = disabled /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982 LLAMA_API struct llama_sampler * llama_sampler_init_dry( const struct llama_vocab * vocab, int32_t n_ctx_train, float dry_multiplier, float dry_base, int32_t dry_allowed_length, int32_t dry_penalty_last_n,
const char ** seq_breakers, size_t num_breakers);
LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias( int32_t n_vocab, int32_t n_logit_bias, const llama_logit_bias * logit_bias); // this sampler is meant to be used for fill-in-the-middle infilling // it's supposed to be used after top_k + top_p sampling // // 1. if the sum of the EOG probs times the number of candidates is higher than the sum of the other probs -> pick EOG // 2. combine probs of tokens that have the same prefix // // example: // // - before: // "hel": 0.5 // "hell": 0.2 // "hello": 0.1 // "dummy": 0.1 //
random
<fim_prefix> for (auto const& output : run_outputs) { target_node_names.push_back(output.node()->name()); } TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph()); return impl()->session_->Run(run_options, feeds, output_tensor_names, target_node_names, outputs, run_metadata); } absl::Status ClientSession::Run( const RunOptions& run_options, const FeedType& inputs, const std::vector<Output>& fetch_outputs, const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs, RunMetadata* run_metadata, const thread::ThreadPoolOptions& threadpool_options) const { std::vector<std::pair<string, Tensor>> feeds; for (auto const& feed : inputs) { <fim_suffix>; feeds.emplace_back(feed.first.name(), feed.second.tensor); } std::vector<string> output_tensor_names; output_tensor_names.reserve(fetch_outputs.size()); for (auto const& output : fetch_outputs) { output_tensor_names.push_back(output.name()); } std::vector<string> target_node_names; target_node_names.reserve(run_outputs.size()); for (auto const& output : run_outputs) { target_node_names.push_back(output.node()->name()); } TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph()); return impl()->session_->Run(run_options, feeds, output_tensor_names, target_node_names, outputs, run_metadata, threadpool_option<fim_middle>TF_RETURN_IF_ERROR(feed.second.status)<fim_end>
for (auto const& output : run_outputs) { target_node_names.push_back(output.node()->name()); } TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph()); return impl()->session_->Run(run_options, feeds, output_tensor_names, target_node_names, outputs, run_metadata); } absl::Status ClientSession::Run( const RunOptions& run_options, const FeedType& inputs, const std::vector<Output>& fetch_outputs, const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs, RunMetadata* run_metadata, const thread::ThreadPoolOptions& threadpool_options) const { std::vector<std::pair<string, Tensor>> feeds; for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status)
; feeds.emplace_back(feed.first.name(), feed.second.tensor); } std::vector<string> output_tensor_names; output_tensor_names.reserve(fetch_outputs.size()); for (auto const& output : fetch_outputs) { output_tensor_names.push_back(output.name()); } std::vector<string> target_node_names; target_node_names.reserve(run_outputs.size()); for (auto const& output : run_outputs) { target_node_names.push_back(output.node()->name()); } TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph()); return impl()->session_->Run(run_options, feeds, output_tensor_names, target_node_names, outputs, run_metadata, threadpool_option
ast_based
<fim_prefix>o_string(line_num); tsv_str += "\t" + std::to_string(word_num); tsv_str += "\t" + std::to_string(left); tsv_str += "\t" + std::to_string(top); tsv_str += "\t" + std::to_string(right - left); tsv_str += "\t" + std::to_string(bottom - top); tsv_str += "\t" + std::to_string(res_it->Confidence(RIL_WORD)); tsv_str += "\t"; #if !defined(NDEBUG) // Increment counts if at end of block/paragraph/textline. if (res_it->IsAtFinalElement(RIL_TEXTLINE, RIL_WORD)) { lcnt++; } if (res_it->IsAtFinalElement(RIL_PARA, RIL_WORD)) { pcnt++; } if (res_it->IsAtFinalElement(RIL_BLOCK, RIL_WORD)) { bcnt++; } #endif do { tsv_str += <fim_suffix>.get(); res_it->Next(RIL_SYMBOL); } while (!res_it->Empty(RIL_BLOCK) && !res_it->IsAtBeginningOf(RIL_WORD)); tsv_str += "\n"; // end of row #if !defined(NDEBUG) wcnt++; #endif } return copy_string(tsv_str); } /** The 5 numbers output for each box (the usual 4 and a page number.) */ const int kNumbersPerBlob = 5; /** * The number of bytes taken by each number. Since we use int16_t for ICOORD, * assume only 5 digits max. */ const int kBytesPerNumber = 5; /** * Multiplier for max expected textlength assumes (kBytesPerNumber + space) * * kNumbersPerBlob plus the newline. Add to this the * original UTF8 characters, and one kMaxBytesPerLine for safety. */ const int k<fim_middle>std::unique_ptr<const char[]>(res_it->GetUTF8Text(RIL_SYMBOL))<fim_end>
o_string(line_num); tsv_str += "\t" + std::to_string(word_num); tsv_str += "\t" + std::to_string(left); tsv_str += "\t" + std::to_string(top); tsv_str += "\t" + std::to_string(right - left); tsv_str += "\t" + std::to_string(bottom - top); tsv_str += "\t" + std::to_string(res_it->Confidence(RIL_WORD)); tsv_str += "\t"; #if !defined(NDEBUG) // Increment counts if at end of block/paragraph/textline. if (res_it->IsAtFinalElement(RIL_TEXTLINE, RIL_WORD)) { lcnt++; } if (res_it->IsAtFinalElement(RIL_PARA, RIL_WORD)) { pcnt++; } if (res_it->IsAtFinalElement(RIL_BLOCK, RIL_WORD)) { bcnt++; } #endif do { tsv_str +=
std::unique_ptr<const char[]>(res_it->GetUTF8Text(RIL_SYMBOL))
.get(); res_it->Next(RIL_SYMBOL); } while (!res_it->Empty(RIL_BLOCK) && !res_it->IsAtBeginningOf(RIL_WORD)); tsv_str += "\n"; // end of row #if !defined(NDEBUG) wcnt++; #endif } return copy_string(tsv_str); } /** The 5 numbers output for each box (the usual 4 and a page number.) */ const int kNumbersPerBlob = 5; /** * The number of bytes taken by each number. Since we use int16_t for ICOORD, * assume only 5 digits max. */ const int kBytesPerNumber = 5; /** * Multiplier for max expected textlength assumes (kBytesPerNumber + space) * * kNumbersPerBlob plus the newline. Add to this the * original UTF8 characters, and one kMaxBytesPerLine for safety. */ const int k
ast_based
<fim_prefix>sion_reloading = false; bool embedded_in_editor = false; bool recovery_mode_hint = false; bool _print_header = true; static inline Engine *singleton = nullptr; String write_movie_path; String shader_cache_path; static constexpr int SERVER_SYNC_FRAME_COUNT_WARNING = 5; int server_syncs = 0; bool frame_server_synced = false; bool freeze_time_scale = false; public: static Engine *get_singleton(); virtual void set_physics_ticks_per_second(int p_ips); virtual int get_physics_ticks_per_second() const; virtual void set_max_physics_steps_per_frame(int p_max_physics_steps); virtual int get_max_physics_steps_per_frame() const; void set_physics_jitter_fix(double p_threshold); <fim_suffix> virtual void set_max_fps(int p_fps); virtual int get_max_fps() const; virtual void set_audio_output_latency(int p_msec); virtual int get_audio_output_latency() const; virtual double get_frames_per_second() const { return _fps; } uint64_t get_frames_drawn(); uint64_t get_physics_frames() const { return _physics_frames; } uint64_t get_process_frames() const { return _process_frames; } bool is_in_physics_frame() const { return _in_physics; } uint64_t get_frame_ticks() const { return _frame_ticks; } double get_process_step() const { return _process_step; } double get_physics_interpolation_fraction() const { return _physics_interpolation_fraction; } void set_time_scale(double <fim_middle>double get_physics_jitter_fix() const;<fim_end>
sion_reloading = false; bool embedded_in_editor = false; bool recovery_mode_hint = false; bool _print_header = true; static inline Engine *singleton = nullptr; String write_movie_path; String shader_cache_path; static constexpr int SERVER_SYNC_FRAME_COUNT_WARNING = 5; int server_syncs = 0; bool frame_server_synced = false; bool freeze_time_scale = false; public: static Engine *get_singleton(); virtual void set_physics_ticks_per_second(int p_ips); virtual int get_physics_ticks_per_second() const; virtual void set_max_physics_steps_per_frame(int p_max_physics_steps); virtual int get_max_physics_steps_per_frame() const; void set_physics_jitter_fix(double p_threshold);
double get_physics_jitter_fix() const;
virtual void set_max_fps(int p_fps); virtual int get_max_fps() const; virtual void set_audio_output_latency(int p_msec); virtual int get_audio_output_latency() const; virtual double get_frames_per_second() const { return _fps; } uint64_t get_frames_drawn(); uint64_t get_physics_frames() const { return _physics_frames; } uint64_t get_process_frames() const { return _process_frames; } bool is_in_physics_frame() const { return _in_physics; } uint64_t get_frame_ticks() const { return _frame_ticks; } double get_process_step() const { return _process_step; } double get_physics_interpolation_fraction() const { return _physics_interpolation_fraction; } void set_time_scale(double
ast_based
<fim_prefix> LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors<fim_suffix> LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors<fim_middle> LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors<fim_end>
LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
random
<fim_prefix>track_key_pair.second, p_mode, p_auto ? Animation::HANDLE_SET_MODE_AUTO : Animation::HANDLE_SET_MODE_RESET); } AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton(); if (ape) { undo_redo->add_do_method(ape, "_animation_update_key_frame"); undo_redo->add_undo_method(ape, "_animation_update_key_frame"); } undo_redo->commit_action(); } void AnimationBezierTrackEdit::_clear_selection_for_anim(const Ref<Animation> &p_anim) { if (!(animation == p_anim) || !is_visible()) { return; } _clear_selection(); } void AnimationBezierTrackEdit::_select_at_anim(const Ref<Animation> &p_anim, int p_track, real_t p_pos, bool p_single) { if (!(animation == p_anim) || !is_visible()) <fim_suffix> int idx = animation->track_find_key(p_track, p_pos, Animation::FIND_MODE_APPROX); ERR_FAIL_COND(idx < 0); selection.insert(IntPair(p_track, idx)); emit_signal(SNAME("select_key"), idx, p_single, p_track); queue_redraw(); } void AnimationBezierTrackEdit::gui_input(const Ref<InputEvent> &p_event) { ERR_FAIL_COND(p_event.is_null()); if (panner->gui_input(p_event)) { accept_event(); return; } if (p_event->is_pressed()) { if (ED_IS_SHORTCUT("animation_editor/duplicate_selected_keys", p_event)) { if (!read_only) { duplicate_selected_keys(-1.0, false); } accept_event(); } if (ED_IS_SHORTCUT("animation_editor/cut_selected_keys", p_event)) { if (!read_only) { <fim_middle>{ return; }<fim_end>
track_key_pair.second, p_mode, p_auto ? Animation::HANDLE_SET_MODE_AUTO : Animation::HANDLE_SET_MODE_RESET); } AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton(); if (ape) { undo_redo->add_do_method(ape, "_animation_update_key_frame"); undo_redo->add_undo_method(ape, "_animation_update_key_frame"); } undo_redo->commit_action(); } void AnimationBezierTrackEdit::_clear_selection_for_anim(const Ref<Animation> &p_anim) { if (!(animation == p_anim) || !is_visible()) { return; } _clear_selection(); } void AnimationBezierTrackEdit::_select_at_anim(const Ref<Animation> &p_anim, int p_track, real_t p_pos, bool p_single) { if (!(animation == p_anim) || !is_visible())
{ return; }
int idx = animation->track_find_key(p_track, p_pos, Animation::FIND_MODE_APPROX); ERR_FAIL_COND(idx < 0); selection.insert(IntPair(p_track, idx)); emit_signal(SNAME("select_key"), idx, p_single, p_track); queue_redraw(); } void AnimationBezierTrackEdit::gui_input(const Ref<InputEvent> &p_event) { ERR_FAIL_COND(p_event.is_null()); if (panner->gui_input(p_event)) { accept_event(); return; } if (p_event->is_pressed()) { if (ED_IS_SHORTCUT("animation_editor/duplicate_selected_keys", p_event)) { if (!read_only) { duplicate_selected_keys(-1.0, false); } accept_event(); } if (ED_IS_SHORTCUT("animation_editor/cut_selected_keys", p_event)) { if (!read_only) {
ast_based
<fim_prefix> bool use_validation_layers = false; bool generate_spirv_debug_info = false; bool extra_gpu_memory_tracking = false; #if defined(DEBUG_ENABLED) || defined(DEV_ENABLED) bool accurate_breadcrumbs = false; #endif int32_t gpu_idx = -1; uint64_t _process_frames = 0; bool _in_physics = false; List<Singleton> singletons; HashMap<StringName, Object *> singleton_ptrs; bool editor_hint = false; bool project_manager_hint = false; bool extension_reloading = false; bool embedded_in_editor = false; bool recovery_mode_hint = false; bool _print_header = true; static inline Engine *singleton = nullptr; String write_movie_path; String shader_cache_path;<fim_suffix> bool frame_server_synced = false; bool freeze_time_scale = false; public: static Engine *get_singleton(); virtual void set_physics_ticks_per_second(int p_ips); virtual int get_physics_ticks_per_second() const; virtual void set_max_physics_steps_per_frame(int p_max_physics_steps); virtual int get_max_physics_steps_per_frame() const; void set_physics_jitter_fix(double p_threshold); double get_physics_jitter_fix() const; virtual void set_max_fps(int p_fps); virtual int get_max_fps() const; virtual void set_audio_output_latency(int p_msec); virtual int get_audio_output_latency() const; virtual double get_frames_per_second() const { return _fps; } <fim_middle> static constexpr int SERVER_SYNC_FRAME_COUNT_WARNING = 5; int server_syncs = 0;<fim_end>
bool use_validation_layers = false; bool generate_spirv_debug_info = false; bool extra_gpu_memory_tracking = false; #if defined(DEBUG_ENABLED) || defined(DEV_ENABLED) bool accurate_breadcrumbs = false; #endif int32_t gpu_idx = -1; uint64_t _process_frames = 0; bool _in_physics = false; List<Singleton> singletons; HashMap<StringName, Object *> singleton_ptrs; bool editor_hint = false; bool project_manager_hint = false; bool extension_reloading = false; bool embedded_in_editor = false; bool recovery_mode_hint = false; bool _print_header = true; static inline Engine *singleton = nullptr; String write_movie_path; String shader_cache_path;
static constexpr int SERVER_SYNC_FRAME_COUNT_WARNING = 5; int server_syncs = 0;
bool frame_server_synced = false; bool freeze_time_scale = false; public: static Engine *get_singleton(); virtual void set_physics_ticks_per_second(int p_ips); virtual int get_physics_ticks_per_second() const; virtual void set_max_physics_steps_per_frame(int p_max_physics_steps); virtual int get_max_physics_steps_per_frame() const; void set_physics_jitter_fix(double p_threshold); double get_physics_jitter_fix() const; virtual void set_max_fps(int p_fps); virtual int get_max_fps() const; virtual void set_audio_output_latency(int p_msec); virtual int get_audio_output_latency() const; virtual double get_frames_per_second() const { return _fps; }
random
<fim_prefix>HDR; } else { target_format = Image::FORMAT_ASTC_4x4; } } else if (p_format == Image::ASTCFormat::ASTC_FORMAT_8x8) { if (is_hdr) { target_format = Image::FORMAT_ASTC_8x8_HDR; } else { target_format = Image::FORMAT_ASTC_8x8; } block_x = 8; block_y = 8; } // Compress image data and (if required) mipmaps. const bool has_mipmaps = r_img->has_mipmaps(); int width = r_img->get_width(); int height = r_img->get_height(); int required_width = (width % block_x) != 0 ? width + (block_x - (width % block_x)) : width; int required_height = (height % block_y) != 0 ? height + (block_y - (height % block_y)) : height; if (width != required_width || height != required_height) <fim_suffix> height = required_height; } print_verbose(vformat("astcenc: Encoding image size %dx%d to format %s%s.", width, height, Image::get_format_name(target_format), has_mipmaps ? ", with mipmaps" : "")); // Initialize astcenc. const int64_t dest_size = Image::get_image_data_size(width, height, target_format, has_mipmaps); Vector<uint8_t> dest_data; dest_data.resize(dest_size); uint8_t *dest_write = dest_data.ptrw(); astcenc_config config; config.block_x = block_x; config.block_y = block_y; config.profile = profile; const float quality = ASTCENC_PRE_MEDIUM; astcenc_error status = astcenc_config_init(profile, block_x, block_y, 1, quality, 0, &config); ERR_FAIL_COND_MSG(status != <fim_middle>{ // Resize texture to fit block size. r_img->resize(required_width, required_height); width = required_width;<fim_end>
HDR; } else { target_format = Image::FORMAT_ASTC_4x4; } } else if (p_format == Image::ASTCFormat::ASTC_FORMAT_8x8) { if (is_hdr) { target_format = Image::FORMAT_ASTC_8x8_HDR; } else { target_format = Image::FORMAT_ASTC_8x8; } block_x = 8; block_y = 8; } // Compress image data and (if required) mipmaps. const bool has_mipmaps = r_img->has_mipmaps(); int width = r_img->get_width(); int height = r_img->get_height(); int required_width = (width % block_x) != 0 ? width + (block_x - (width % block_x)) : width; int required_height = (height % block_y) != 0 ? height + (block_y - (height % block_y)) : height; if (width != required_width || height != required_height)
{ // Resize texture to fit block size. r_img->resize(required_width, required_height); width = required_width;
height = required_height; } print_verbose(vformat("astcenc: Encoding image size %dx%d to format %s%s.", width, height, Image::get_format_name(target_format), has_mipmaps ? ", with mipmaps" : "")); // Initialize astcenc. const int64_t dest_size = Image::get_image_data_size(width, height, target_format, has_mipmaps); Vector<uint8_t> dest_data; dest_data.resize(dest_size); uint8_t *dest_write = dest_data.ptrw(); astcenc_config config; config.block_x = block_x; config.block_y = block_y; config.profile = profile; const float quality = ASTCENC_PRE_MEDIUM; astcenc_error status = astcenc_config_init(profile, block_x, block_y, 1, quality, 0, &config); ERR_FAIL_COND_MSG(status !=
ast_based
<fim_prefix> return shader_cache_path; } Engine *Engine::get_singleton() { return singleton; } bool Engine::notify_frame_server_synced() { frame_server_synced = true; return server_syncs > SERVER_SYNC_FRAME_COUNT_WARNING; } void Engine::set_freeze_time_scale(bool p_frozen) { freeze_time_scale = p_frozen; } void Engine::set_embedded_in_editor(bool p_enabled) { embedded_in_editor = p_enabled; } bool Engine::is_embedded_in_editor() const { return embedded_in_editor; } Engine::Engine() { singleton = this; } Engine::~Engine() { if (singleton == this) { singleton = nullptr; } } Engine::Singleton::Singleton(const StringName &p_name, Object *p_ptr, const StringName &p_class_name) :<fim_suffix>#ifdef DEBUG_ENABLED RefCounted *rc = Object::cast_to<RefCounted>(p_ptr); if (rc && !rc->is_referenced()) { WARN_PRINT("You must use Ref<> to ensure the lifetime of a RefCounted object intended to be used as a singleton."); } #endif } <fim_middle> name(p_name), ptr(p_ptr), class_name(p_class_name) {<fim_end>
return shader_cache_path; } Engine *Engine::get_singleton() { return singleton; } bool Engine::notify_frame_server_synced() { frame_server_synced = true; return server_syncs > SERVER_SYNC_FRAME_COUNT_WARNING; } void Engine::set_freeze_time_scale(bool p_frozen) { freeze_time_scale = p_frozen; } void Engine::set_embedded_in_editor(bool p_enabled) { embedded_in_editor = p_enabled; } bool Engine::is_embedded_in_editor() const { return embedded_in_editor; } Engine::Engine() { singleton = this; } Engine::~Engine() { if (singleton == this) { singleton = nullptr; } } Engine::Singleton::Singleton(const StringName &p_name, Object *p_ptr, const StringName &p_class_name) :
name(p_name), ptr(p_ptr), class_name(p_class_name) {
#ifdef DEBUG_ENABLED RefCounted *rc = Object::cast_to<RefCounted>(p_ptr); if (rc && !rc->is_referenced()) { WARN_PRINT("You must use Ref<> to ensure the lifetime of a RefCounted object intended to be used as a singleton."); } #endif }
random
<fim_prefix>_modes.end(); ++restore_itr, ++handle_itr) { const AnimMoveRestore &amr = *restore_itr; Array key = amr.key; undo_redo->add_undo_method(animation.ptr(), "track_insert_key", amr.track, amr.time, amr.key, 1); undo_redo->add_undo_method( this, "_bezier_track_insert_key_at_anim", animation, amr.track, amr.time, key[0], Vector2(key[1], key[2]), Vector2(key[3], key[4]), *handle_itr); } undo_redo->add_do_method(this, "_clear_selection_for_anim", animation); undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation); // 7 - Reselect. int i = 0; for (SelectionSet::Element *E = selection.back(); E; <fim_suffix>) { real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second); real_t newpos = animation->track_get_key_time(E->get().first, E->get().second); newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1); undo_redo->add_do_method(this, "_select_at_anim", animation, E->get().first, newpos, i == 0); undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, oldpos, i == 0); i++; } AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton(); if (ape) { undo_redo->add_do_method(ape, "_animation_update_key_frame"); undo_redo->add_undo_method(ape, "_animatio<fim_middle>E = E->prev()<fim_end>
_modes.end(); ++restore_itr, ++handle_itr) { const AnimMoveRestore &amr = *restore_itr; Array key = amr.key; undo_redo->add_undo_method(animation.ptr(), "track_insert_key", amr.track, amr.time, amr.key, 1); undo_redo->add_undo_method( this, "_bezier_track_insert_key_at_anim", animation, amr.track, amr.time, key[0], Vector2(key[1], key[2]), Vector2(key[3], key[4]), *handle_itr); } undo_redo->add_do_method(this, "_clear_selection_for_anim", animation); undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation); // 7 - Reselect. int i = 0; for (SelectionSet::Element *E = selection.back(); E;
E = E->prev()
) { real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second); real_t newpos = animation->track_get_key_time(E->get().first, E->get().second); newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1); undo_redo->add_do_method(this, "_select_at_anim", animation, E->get().first, newpos, i == 0); undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, oldpos, i == 0); i++; } AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton(); if (ape) { undo_redo->add_do_method(ape, "_animation_update_key_frame"); undo_redo->add_undo_method(ape, "_animatio
ast_based
<fim_prefix>CCESSIBILITY_UPDATE notification."); if (p_id.is_valid() && rid_owner.owns(p_id)) { focus = p_id; } else { focus = RID(); } } RID AccessibilityDriverAccessKit::accessibility_get_window_root(DisplayServer::WindowID p_window_id) const { const WindowData *wd = windows.getptr(p_window_id); ERR_FAIL_NULL_V(wd, RID()); return wd->root_id; } accesskit_tree_update *AccessibilityDriverAccessKit::_accessibility_build_tree_update(void *p_user_data) { DisplayServer::WindowID window_id = (DisplayServer::WindowID)(size_t)p_user_data; ERR_FAIL_COND_V(!singleton->windows.has(window_id), nullptr); WindowData &wd = singleton->windows[window_id]; singleton->in_accessibility_update = true; <fim_suffix> singleton->in_accessibility_update = false; AccessibilityElement *focus_ae = singleton->rid_owner.get_or_null(singleton->focus); uint32_t update_size = wd.update.size(); accesskit_node_id ac_focus = (accesskit_node_id)wd.root_id.get_id(); if (focus_ae && focus_ae->window_id == window_id) { ac_focus = (accesskit_node_id)singleton->focus.get_id(); } accesskit_tree_update *tree_update = (update_size > 0) ? accesskit_tree_update_with_capacity_and_focus(update_size, ac_focus) : accesskit_tree_update_with_focus(ac_focus); for (const RID &rid : wd.update) { AccessibilityElement *ae = singleton->rid_owner.get_or_null(rid); if (ae && ae->node) { for (const RID &child_rid : ae->ch<fim_middle>if (singleton->update_cb.is_valid()) { singleton->update_cb.call(window_id); }<fim_end>
CCESSIBILITY_UPDATE notification."); if (p_id.is_valid() && rid_owner.owns(p_id)) { focus = p_id; } else { focus = RID(); } } RID AccessibilityDriverAccessKit::accessibility_get_window_root(DisplayServer::WindowID p_window_id) const { const WindowData *wd = windows.getptr(p_window_id); ERR_FAIL_NULL_V(wd, RID()); return wd->root_id; } accesskit_tree_update *AccessibilityDriverAccessKit::_accessibility_build_tree_update(void *p_user_data) { DisplayServer::WindowID window_id = (DisplayServer::WindowID)(size_t)p_user_data; ERR_FAIL_COND_V(!singleton->windows.has(window_id), nullptr); WindowData &wd = singleton->windows[window_id]; singleton->in_accessibility_update = true;
if (singleton->update_cb.is_valid()) { singleton->update_cb.call(window_id); }
singleton->in_accessibility_update = false; AccessibilityElement *focus_ae = singleton->rid_owner.get_or_null(singleton->focus); uint32_t update_size = wd.update.size(); accesskit_node_id ac_focus = (accesskit_node_id)wd.root_id.get_id(); if (focus_ae && focus_ae->window_id == window_id) { ac_focus = (accesskit_node_id)singleton->focus.get_id(); } accesskit_tree_update *tree_update = (update_size > 0) ? accesskit_tree_update_with_capacity_and_focus(update_size, ac_focus) : accesskit_tree_update_with_focus(ac_focus); for (const RID &rid : wd.update) { AccessibilityElement *ae = singleton->rid_owner.get_or_null(rid); if (ae && ae->node) { for (const RID &child_rid : ae->ch
ast_based
<fim_prefix>y) { EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton(); undo_redo->create_action(TTR("Move Bezier Points")); if (moving_handle == -1) { real_t ratio = timeline->get_zoom_scale() * timeline_v_zoom; undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_in_handle", moving_handle_track, moving_handle_key, moving_handle_left, ratio); undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_in_handle", moving_handle_track, moving_handle_key, animation->bezier_track_get_key_in_handle(moving_handle_track, moving_handle_key), ratio); } else if (moving_handle == 1) { real_t ratio = timeline->get_zoom_scale() * timeline_v_zoom; <fim_suffix> undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_out_handle", moving_handle_track, moving_handle_key, animation->bezier_track_get_key_out_handle(moving_handle_track, moving_handle_key), ratio); } AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton(); if (ape) { undo_redo->add_do_method(ape, "_animation_update_key_frame"); undo_redo->add_undo_method(ape, "_animation_update_key_frame"); } undo_redo->commit_action(); moving_handle = 0; queue_redraw(); } } } bool AnimationBezierTrackEdit::_try_select_at_ui_pos(const Point2 &p_pos, bool p_aggregate, bool p_deselectable) { for (int i = 0; i < edit_points.size(); i++) { // Path<fim_middle>undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_out_handle", moving_handle_track, moving_handle_key, moving_handle_right, ratio);<fim_end>
y) { EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton(); undo_redo->create_action(TTR("Move Bezier Points")); if (moving_handle == -1) { real_t ratio = timeline->get_zoom_scale() * timeline_v_zoom; undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_in_handle", moving_handle_track, moving_handle_key, moving_handle_left, ratio); undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_in_handle", moving_handle_track, moving_handle_key, animation->bezier_track_get_key_in_handle(moving_handle_track, moving_handle_key), ratio); } else if (moving_handle == 1) { real_t ratio = timeline->get_zoom_scale() * timeline_v_zoom;
undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_out_handle", moving_handle_track, moving_handle_key, moving_handle_right, ratio);
undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_out_handle", moving_handle_track, moving_handle_key, animation->bezier_track_get_key_out_handle(moving_handle_track, moving_handle_key), ratio); } AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton(); if (ape) { undo_redo->add_do_method(ape, "_animation_update_key_frame"); undo_redo->add_undo_method(ape, "_animation_update_key_frame"); } undo_redo->commit_action(); moving_handle = 0; queue_redraw(); } } } bool AnimationBezierTrackEdit::_try_select_at_ui_pos(const Point2 &p_pos, bool p_aggregate, bool p_deselectable) { for (int i = 0; i < edit_points.size(); i++) { // Path
ast_based
<fim_prefix>_size(width, height, target_format, has_mipmaps); Vector<uint8_t> dest_data; dest_data.resize(dest_size); uint8_t *dest_write = dest_data.ptrw(); // Decompress image. const int mip_count = has_mipmaps ? Image::get_image_required_mipmaps(width, height, target_format) : 0; const uint8_t *src_data = r_img->ptr(); for (int i = 0; i < mip_count + 1; i++) { const int64_t src_ofs = Image::get_image_mipmap_offset(width, height, src_format, i); const uint8_t *mip_data = &src_data[src_ofs]; int64_t src_size; if (i == mip_count) { src_size = r_img->get_data_size() - src_ofs; } else { src_size = Image::get_image_mipmap_offset(width, height, src_format, i + 1) - src_ofs; } <fim_suffix> const int64_t dst_ofs = Image::get_image_mipmap_offset_and_dimensions(width, height, target_format, i, dst_mip_w, dst_mip_h); // Ensure that mip offset is a multiple of 8 (etcpak expects uint64_t pointer). ERR_FAIL_COND(dst_ofs % 8 != 0); uint8_t *dest_mip_write = &dest_write[dst_ofs]; astcenc_image image; image.dim_x = dst_mip_w; image.dim_y = dst_mip_h; image.dim_z = 1; image.data_type = is_hdr ? ASTCENC_TYPE_F16 : ASTCENC_TYPE_U8; image.data = (void **)(&dest_mip_write); const astcenc_swizzle swizzle = { ASTCENC_SWZ_R, ASTCENC_SWZ_G, ASTCENC_SWZ_B, ASTCENC_SWZ_A }; status = astcenc_decompress_image(context, mip_data, src_size, &image, &swizzle, 0); ERR<fim_middle>int dst_mip_w, dst_mip_h;<fim_end>
_size(width, height, target_format, has_mipmaps); Vector<uint8_t> dest_data; dest_data.resize(dest_size); uint8_t *dest_write = dest_data.ptrw(); // Decompress image. const int mip_count = has_mipmaps ? Image::get_image_required_mipmaps(width, height, target_format) : 0; const uint8_t *src_data = r_img->ptr(); for (int i = 0; i < mip_count + 1; i++) { const int64_t src_ofs = Image::get_image_mipmap_offset(width, height, src_format, i); const uint8_t *mip_data = &src_data[src_ofs]; int64_t src_size; if (i == mip_count) { src_size = r_img->get_data_size() - src_ofs; } else { src_size = Image::get_image_mipmap_offset(width, height, src_format, i + 1) - src_ofs; }
int dst_mip_w, dst_mip_h;
const int64_t dst_ofs = Image::get_image_mipmap_offset_and_dimensions(width, height, target_format, i, dst_mip_w, dst_mip_h); // Ensure that mip offset is a multiple of 8 (etcpak expects uint64_t pointer). ERR_FAIL_COND(dst_ofs % 8 != 0); uint8_t *dest_mip_write = &dest_write[dst_ofs]; astcenc_image image; image.dim_x = dst_mip_w; image.dim_y = dst_mip_h; image.dim_z = 1; image.data_type = is_hdr ? ASTCENC_TYPE_F16 : ASTCENC_TYPE_U8; image.data = (void **)(&dest_mip_write); const astcenc_swizzle swizzle = { ASTCENC_SWZ_R, ASTCENC_SWZ_G, ASTCENC_SWZ_B, ASTCENC_SWZ_A }; status = astcenc_decompress_image(context, mip_data, src_size, &image, &swizzle, 0); ERR
ast_based
<fim_prefix> if (!buf) { LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__); return false; } ggml_backend_buffer_clear(buf, 0); bufs.emplace_back(buf); } return true; } bool llama_adapter_cvec::apply( const llama_model & model, const float * data, size_t len, int32_t n_embd, int32_t il_start, int32_t il_end) { const auto & hparams = model.hparams; if (data == nullptr) { // disable the current control vector (but leave allocated for later) layer_start = -1; layer_end = -1;<fim_suffix> } if (n_embd != (int) hparams.n_embd) { LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__); return false; } if (tensors.empty()) { if (!init(model)) { return false; } } layer_start = il_start; layer_end = il_end; for (size_t il = 1; il < hparams.n_layer; il++) { assert(tensors[il] != nullptr); const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present if (off + n_embd <= len) { ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il])); } } return true;<fim_middle> return true;<fim_end>
if (!buf) { LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__); return false; } ggml_backend_buffer_clear(buf, 0); bufs.emplace_back(buf); } return true; } bool llama_adapter_cvec::apply( const llama_model & model, const float * data, size_t len, int32_t n_embd, int32_t il_start, int32_t il_end) { const auto & hparams = model.hparams; if (data == nullptr) { // disable the current control vector (but leave allocated for later) layer_start = -1; layer_end = -1;
return true;
} if (n_embd != (int) hparams.n_embd) { LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__); return false; } if (tensors.empty()) { if (!init(model)) { return false; } } layer_start = il_start; layer_end = il_end; for (size_t il = 1; il < hparams.n_layer; il++) { assert(tensors[il] != nullptr); const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present if (off + n_embd <= len) { ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il])); } } return true;
random
<fim_prefix> * WARNING! This class points to data held within the TessBaseAPI class, and * therefore can only be used while the TessBaseAPI class still exists and * has not been subjected to a call of Init, SetImage, Recognize, Clear, End * DetectOS, or anything else that changes the internal PAGE_RES. */ MutableIterator *TessBaseAPI::GetMutableIterator() { if (tesseract_ == nullptr || page_res_ == nullptr) { return nullptr; } return new MutableIterator(page_res_, tesseract_, thresholder_->GetScaleFactor(), thresholder_->GetScaledYResolution(), rect_left_, rect_top_, rect_width_, rect_height_); } <fim_suffix> } std::string text(""); const std::unique_ptr</*non-const*/ ResultIterator> it(GetIterator()); do { if (it->Empty(RIL_PARA)) { continue; } auto block_type = it->BlockType(); switch (block_type) { case PT_FLOWING_IMAGE: case PT_HEADING_IMAGE: case PT_PULLOUT_IMAGE: case PT_HORZ_LINE: case PT_VERT_LINE: // Ignore images and lines for text output. continue; case PT_NOISE: tprintf("TODO: Please report image which triggers the noise case.\n"); ASSERT_HOST(false); default: break; } const std::unique_ptr<const char[]> para_text(it->GetUTF8Text(RIL_PARA));<fim_middle>/** Make a text string from the internal data structures. */ char *TessBaseAPI::GetUTF8Text() { if (tesseract_ == nullptr || (!recognition_done_ && Recognize(nullptr) < 0)) { return nullptr;<fim_end>
* WARNING! This class points to data held within the TessBaseAPI class, and * therefore can only be used while the TessBaseAPI class still exists and * has not been subjected to a call of Init, SetImage, Recognize, Clear, End * DetectOS, or anything else that changes the internal PAGE_RES. */ MutableIterator *TessBaseAPI::GetMutableIterator() { if (tesseract_ == nullptr || page_res_ == nullptr) { return nullptr; } return new MutableIterator(page_res_, tesseract_, thresholder_->GetScaleFactor(), thresholder_->GetScaledYResolution(), rect_left_, rect_top_, rect_width_, rect_height_); }
/** Make a text string from the internal data structures. */ char *TessBaseAPI::GetUTF8Text() { if (tesseract_ == nullptr || (!recognition_done_ && Recognize(nullptr) < 0)) { return nullptr;
} std::string text(""); const std::unique_ptr</*non-const*/ ResultIterator> it(GetIterator()); do { if (it->Empty(RIL_PARA)) { continue; } auto block_type = it->BlockType(); switch (block_type) { case PT_FLOWING_IMAGE: case PT_HEADING_IMAGE: case PT_PULLOUT_IMAGE: case PT_HORZ_LINE: case PT_VERT_LINE: // Ignore images and lines for text output. continue; case PT_NOISE: tprintf("TODO: Please report image which triggers the noise case.\n"); ASSERT_HOST(false); default: break; } const std::unique_ptr<const char[]> para_text(it->GetUTF8Text(RIL_PARA));
random
<fim_prefix> r_options->push_back(String(name).quote()); } } } Node2D::get_argument_options(p_function, p_idx, r_options); } #endif // TOOLS_ENABLED #ifndef DISABLE_DEPRECATED bool AnimatedSprite2D::_set(const StringName &p_name, const Variant &p_value) { if ((p_name == SNAME("frames"))) { set_sprite_frames(p_value); return true; } return false; } #endif void AnimatedSprite2D::_bind_methods() { ClassDB::bind_method(D_METHOD("set_sprite_frames", "sprite_frames"), &AnimatedSprite2D::set_sprite_frames); ClassDB::bind_method(D_METHOD("get_sprite_frames"), &AnimatedSprite2D::get_sprite_frames); ClassDB::bind_method(D_METHOD("set_animation", "name"), &AnimatedSprite2D::set_animation); <fim_suffix>; ClassDB::bind_method(D_METHOD("set_autoplay", "name"), &AnimatedSprite2D::set_autoplay); ClassDB::bind_method(D_METHOD("get_autoplay"), &AnimatedSprite2D::get_autoplay); ClassDB::bind_method(D_METHOD("is_playing"), &AnimatedSprite2D::is_playing); ClassDB::bind_method(D_METHOD("play", "name", "custom_speed", "from_end"), &AnimatedSprite2D::play, DEFVAL(StringName()), DEFVAL(1.0), DEFVAL(false)); ClassDB::bind_method(D_METHOD("play_backwards", "name"), &AnimatedSprite2D::play_backwards, DEFVAL(StringName())); ClassDB::bind_method(D_METHOD("pause"), &AnimatedSprite2D::pause); ClassDB::bind_method(D_METHOD("stop"), &AnimatedSprite2D::stop); ClassDB::bind_method(D_METHOD("set_center<fim_middle>ClassDB::bind_method(D_METHOD("get_animation"), &AnimatedSprite2D::get_animation)<fim_end>
r_options->push_back(String(name).quote()); } } } Node2D::get_argument_options(p_function, p_idx, r_options); } #endif // TOOLS_ENABLED #ifndef DISABLE_DEPRECATED bool AnimatedSprite2D::_set(const StringName &p_name, const Variant &p_value) { if ((p_name == SNAME("frames"))) { set_sprite_frames(p_value); return true; } return false; } #endif void AnimatedSprite2D::_bind_methods() { ClassDB::bind_method(D_METHOD("set_sprite_frames", "sprite_frames"), &AnimatedSprite2D::set_sprite_frames); ClassDB::bind_method(D_METHOD("get_sprite_frames"), &AnimatedSprite2D::get_sprite_frames); ClassDB::bind_method(D_METHOD("set_animation", "name"), &AnimatedSprite2D::set_animation);
ClassDB::bind_method(D_METHOD("get_animation"), &AnimatedSprite2D::get_animation)
; ClassDB::bind_method(D_METHOD("set_autoplay", "name"), &AnimatedSprite2D::set_autoplay); ClassDB::bind_method(D_METHOD("get_autoplay"), &AnimatedSprite2D::get_autoplay); ClassDB::bind_method(D_METHOD("is_playing"), &AnimatedSprite2D::is_playing); ClassDB::bind_method(D_METHOD("play", "name", "custom_speed", "from_end"), &AnimatedSprite2D::play, DEFVAL(StringName()), DEFVAL(1.0), DEFVAL(false)); ClassDB::bind_method(D_METHOD("play_backwards", "name"), &AnimatedSprite2D::play_backwards, DEFVAL(StringName())); ClassDB::bind_method(D_METHOD("pause"), &AnimatedSprite2D::pause); ClassDB::bind_method(D_METHOD("stop"), &AnimatedSprite2D::stop); ClassDB::bind_method(D_METHOD("set_center
ast_based
<fim_prefix> ERR_FAIL_NULL(ae); _ensure_node(p_id, ae); accesskit_node_set_foreground_color(ae->node, p_color.to_rgba32()); } Error AccessibilityDriverAccessKit::init() { #ifdef ACCESSKIT_DYNAMIC #ifdef DEBUG_ENABLED int dylibloader_verbose = 1; #else int dylibloader_verbose = 0; #endif void *library_handle = nullptr; String path; String arch = Engine::get_singleton()->get_architecture_name(); #ifdef LINUXBSD_ENABLED path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit." + arch + ".so"); if (!FileAccess::exists(path)) { path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../lib").path_join("libaccesskit." + arch + ".so"); }<fim_suffix> path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit.so"); } if (!FileAccess::exists(path)) { path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../lib").path_join("libaccesskit.so"); } if (!FileAccess::exists(path)) { return ERR_CANT_CREATE; } #endif #ifdef MACOS_ENABLED path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit." + arch + ".dylib"); if (!FileAccess::exists(path)) { path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../Frameworks").path_join("libaccesskit." + arch + ".dylib"); } if (!FileAccess::exists(path)) {<fim_middle> if (!FileAccess::exists(path)) {<fim_end>
ERR_FAIL_NULL(ae); _ensure_node(p_id, ae); accesskit_node_set_foreground_color(ae->node, p_color.to_rgba32()); } Error AccessibilityDriverAccessKit::init() { #ifdef ACCESSKIT_DYNAMIC #ifdef DEBUG_ENABLED int dylibloader_verbose = 1; #else int dylibloader_verbose = 0; #endif void *library_handle = nullptr; String path; String arch = Engine::get_singleton()->get_architecture_name(); #ifdef LINUXBSD_ENABLED path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit." + arch + ".so"); if (!FileAccess::exists(path)) { path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../lib").path_join("libaccesskit." + arch + ".so"); }
if (!FileAccess::exists(path)) {
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit.so"); } if (!FileAccess::exists(path)) { path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../lib").path_join("libaccesskit.so"); } if (!FileAccess::exists(path)) { return ERR_CANT_CREATE; } #endif #ifdef MACOS_ENABLED path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit." + arch + ".dylib"); if (!FileAccess::exists(path)) { path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../Frameworks").path_join("libaccesskit." + arch + ".dylib"); } if (!FileAccess::exists(path)) {
random
<fim_prefix>onst; Dictionary get_donor_info() const; Dictionary get_license_info() const; String get_license_text() const; void set_write_movie_path(const String &p_path); String get_write_movie_path() const; String get_architecture_name() const; void set_shader_cache_path(const String &p_path); String get_shader_cache_path() const; bool is_abort_on_gpu_errors_enabled() const; bool is_validation_layers_enabled() const; bool is_generate_spirv_debug_info_enabled() const; bool is_extra_gpu_memory_tracking_enabled() const; #if defined(DEBUG_ENABLED) || defined(DEV_ENABLED) bool is_accurate_breadcrumbs_enabled() const; #endif int32_t get_gpu_index() const; void increment_frames_drawn(); <fim_suffix> void set_freeze_time_scale(bool p_frozen); void set_embedded_in_editor(bool p_enabled); bool is_embedded_in_editor() const; Engine(); virtual ~Engine(); }; <fim_middle>bool notify_frame_server_synced();<fim_end>
onst; Dictionary get_donor_info() const; Dictionary get_license_info() const; String get_license_text() const; void set_write_movie_path(const String &p_path); String get_write_movie_path() const; String get_architecture_name() const; void set_shader_cache_path(const String &p_path); String get_shader_cache_path() const; bool is_abort_on_gpu_errors_enabled() const; bool is_validation_layers_enabled() const; bool is_generate_spirv_debug_info_enabled() const; bool is_extra_gpu_memory_tracking_enabled() const; #if defined(DEBUG_ENABLED) || defined(DEV_ENABLED) bool is_accurate_breadcrumbs_enabled() const; #endif int32_t get_gpu_index() const; void increment_frames_drawn();
bool notify_frame_server_synced();
void set_freeze_time_scale(bool p_frozen); void set_embedded_in_editor(bool p_enabled); bool is_embedded_in_editor() const; Engine(); virtual ~Engine(); };
ast_based
<fim_prefix>/ // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "src/core/credentials/call/call_credentials.h" #include <grpc/support/port_platform.h> #include <grpcpp/security/credentials.h> #include "absl/strings/str_cat.h" #include "src/core/util/grpc_check.h" namespace grpc { CallCredentials::CallCredentials(grpc_call_credentials* c_creds) : c_creds_(c_creds) { <fim_suffix> } CallCredentials::~CallCredentials() { grpc_call_credentials_release(c_creds_); } grpc::string CallCredentials::DebugString() { return absl::StrCat("CallCredentials{", c_creds_->debug_string(), "}"); } bool CallCredentials::ApplyToCall(grpc_call* call) { return grpc_call_set_credentials(call, c_creds_) == GRPC_CALL_OK; } } // namespace grpc <fim_middle>GRPC_CHECK_NE(c_creds, nullptr);<fim_end>
/ // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "src/core/credentials/call/call_credentials.h" #include <grpc/support/port_platform.h> #include <grpcpp/security/credentials.h> #include "absl/strings/str_cat.h" #include "src/core/util/grpc_check.h" namespace grpc { CallCredentials::CallCredentials(grpc_call_credentials* c_creds) : c_creds_(c_creds) {
GRPC_CHECK_NE(c_creds, nullptr);
} CallCredentials::~CallCredentials() { grpc_call_credentials_release(c_creds_); } grpc::string CallCredentials::DebugString() { return absl::StrCat("CallCredentials{", c_creds_->debug_string(), "}"); } bool CallCredentials::ApplyToCall(grpc_call* call) { return grpc_call_set_credentials(call, c_creds_) == GRPC_CALL_OK; } } // namespace grpc
ast_based
<fim_prefix>#include "dlhandle.h" #include <string> #ifndef _WIN32 # include <dlfcn.h> #else # include <cassert> # include <sstream> # define WIN32_LEAN_AND_MEAN # ifndef NOMINMAX # define NOMINMAX # endif # include <windows.h> #endif using namespace std::string_literals; namespace fs = std::filesystem; #ifndef _WIN32 Dlhandle::Dlhandle(const fs::path &fpath) { chandle = dlopen(fpath.c_str(), RTLD_LAZY | RTLD_LOCAL); if (!chandle) { throw Exception("dlopen: "s + dlerror()); } } Dlhandle::~Dlhandle() { if (chandle) <fim_suffix> } void *Dlhandle::get_internal(const char *symbol) const { return dlsym(chandle, symbol); } #else // defined(_WIN32) Dlhandle::Dlhandle(const fs::path &fpath) { fs::path afpath = fs::absolute(fpath); // Suppress the "Entry Point Not Found" dialog, caused by outdated nvcuda.dll from the GPU driver UINT lastErrorMode = GetErrorMode(); SetErrorMode(lastErrorMode | SEM_FAILCRITICALERRORS); chandle = LoadLibraryExW(afpath.c_str(), NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS | LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR); SetErrorMode(lastErrorMode); if (!chandle) { DWORD err = GetLastError(); std::ostringstream ss; ss << "LoadLibraryExW failed with <fim_middle>dlclose(chandle);<fim_end>
#include "dlhandle.h" #include <string> #ifndef _WIN32 # include <dlfcn.h> #else # include <cassert> # include <sstream> # define WIN32_LEAN_AND_MEAN # ifndef NOMINMAX # define NOMINMAX # endif # include <windows.h> #endif using namespace std::string_literals; namespace fs = std::filesystem; #ifndef _WIN32 Dlhandle::Dlhandle(const fs::path &fpath) { chandle = dlopen(fpath.c_str(), RTLD_LAZY | RTLD_LOCAL); if (!chandle) { throw Exception("dlopen: "s + dlerror()); } } Dlhandle::~Dlhandle() { if (chandle)
dlclose(chandle);
} void *Dlhandle::get_internal(const char *symbol) const { return dlsym(chandle, symbol); } #else // defined(_WIN32) Dlhandle::Dlhandle(const fs::path &fpath) { fs::path afpath = fs::absolute(fpath); // Suppress the "Entry Point Not Found" dialog, caused by outdated nvcuda.dll from the GPU driver UINT lastErrorMode = GetErrorMode(); SetErrorMode(lastErrorMode | SEM_FAILCRITICALERRORS); chandle = LoadLibraryExW(afpath.c_str(), NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS | LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR); SetErrorMode(lastErrorMode); if (!chandle) { DWORD err = GetLastError(); std::ostringstream ss; ss << "LoadLibraryExW failed with
ast_based
<fim_prefix> undo_redo->add_do_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, E->get().second), animation->track_get_key_transition(E->get().first, E->get().second)); undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, dst_time); Pair<int, real_t> p; p.first = E->get().first; p.second = dst_time; new_selection_values.push_back(p); if (existing_idx != -1) { undo_redo->add_undo_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, existing_idx), animation->track_get_key_transition(E->get().first, existing_idx)); } } <fim_suffix>; undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation); // Reselect duplicated. int i = 0; for (const Pair<int, real_t> &E : new_selection_values) { undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0); i++; } i = 0; for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) { real_t time = animation->track_get_key_time(E->get().first, E->get().second); undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, time, i == 0); i++; } AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton(); if (ape) { undo_redo->add_do_method(ape, "_animation_update_key_frame"); undo_redo-><fim_middle>undo_redo->add_do_method(this, "_clear_selection_for_anim", animation)<fim_end>
undo_redo->add_do_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, E->get().second), animation->track_get_key_transition(E->get().first, E->get().second)); undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, dst_time); Pair<int, real_t> p; p.first = E->get().first; p.second = dst_time; new_selection_values.push_back(p); if (existing_idx != -1) { undo_redo->add_undo_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, existing_idx), animation->track_get_key_transition(E->get().first, existing_idx)); } }
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation)
; undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation); // Reselect duplicated. int i = 0; for (const Pair<int, real_t> &E : new_selection_values) { undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0); i++; } i = 0; for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) { real_t time = animation->track_get_key_time(E->get().first, E->get().second); undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, time, i == 0); i++; } AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton(); if (ape) { undo_redo->add_do_method(ape, "_animation_update_key_frame"); undo_redo->
ast_based
<fim_prefix> absl::Status Run(const FeedType& inputs, const std::vector<Output>& fetch_outputs, std::vector<Tensor>* outputs) const; /// Same as above. Additionally runs the operations ins `run_outputs`. absl::Status Run(const FeedType& inputs, const std::vector<Output>& fetch_outputs, const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs) const; /// Use `run_options` to turn on performance profiling. `run_metadata`, if not /// null, is filled in with the profiling results. absl::Status Run(const RunOptions& run_options, const FeedType& inputs,<fim_suffix> const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs, RunMetadata* run_metadata) const; /// Same as above. Additionally allows user to provide custom threadpool /// implementation via ThreadPoolOptions. absl::Status Run(const RunOptions& run_options, const FeedType& inputs, const std::vector<Output>& fetch_outputs, const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs, RunMetadata* run_metadata, const thread::ThreadPoolOptions& threadpool_options) const; /// \brief A handle to a subgraph, created with<fim_middle> const std::vector<Output>& fetch_outputs,<fim_end>
absl::Status Run(const FeedType& inputs, const std::vector<Output>& fetch_outputs, std::vector<Tensor>* outputs) const; /// Same as above. Additionally runs the operations ins `run_outputs`. absl::Status Run(const FeedType& inputs, const std::vector<Output>& fetch_outputs, const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs) const; /// Use `run_options` to turn on performance profiling. `run_metadata`, if not /// null, is filled in with the profiling results. absl::Status Run(const RunOptions& run_options, const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs, RunMetadata* run_metadata) const; /// Same as above. Additionally allows user to provide custom threadpool /// implementation via ThreadPoolOptions. absl::Status Run(const RunOptions& run_options, const FeedType& inputs, const std::vector<Output>& fetch_outputs, const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs, RunMetadata* run_metadata, const thread::ThreadPoolOptions& threadpool_options) const; /// \brief A handle to a subgraph, created with
random
<fim_prefix> " but data path is undefined\n"); delete osd_tesseract_; osd_tesseract_ = nullptr; } else if (osd_tesseract_->init_tesseract(datapath_, "", "osd", OEM_TESSERACT_ONLY, nullptr, 0, nullptr, nullptr, false, &mgr) == 0) { osd_tess = osd_tesseract_; osd_tesseract_->set_source_resolution(thresholder_->GetSourceYResolution()); } else { tprintf( "Warning: Auto orientation and script detection requested," " but osd language failed to load\n"); delete osd_tesseract_; osd_tesseract_ = nullptr; } } } #endif // ndef DISABLED_LEGACY_ENGINE <fim_suffix> // If Devanagari is being recognized, we use different images for page seg // and for OCR. tesseract_->PrepareForTessOCR(block_list_, osd_tess, &osr); return 0; } /** * Return average gradient of lines on page. */ float TessBaseAPI::GetGradient() { return tesseract_->gradient(); } /** Delete the pageres and clear the block list ready for a new page. */ void TessBaseAPI::ClearResults() { if (tesseract_ != nullptr) { tesseract_->Clear(); } delete page_res_; page_res_ = nullptr; recognition_done_ = false; if (block_list_ == nullptr) { block_list_ = new BLOCK_LIST; } else { block_list_->clear(); } if (paragraph_models_ != nullptr) {<fim_middle> if (tesseract_->SegmentPage(input_file_.c_str(), block_list_, osd_tess, &osr) < 0) { return -1; }<fim_end>
" but data path is undefined\n"); delete osd_tesseract_; osd_tesseract_ = nullptr; } else if (osd_tesseract_->init_tesseract(datapath_, "", "osd", OEM_TESSERACT_ONLY, nullptr, 0, nullptr, nullptr, false, &mgr) == 0) { osd_tess = osd_tesseract_; osd_tesseract_->set_source_resolution(thresholder_->GetSourceYResolution()); } else { tprintf( "Warning: Auto orientation and script detection requested," " but osd language failed to load\n"); delete osd_tesseract_; osd_tesseract_ = nullptr; } } } #endif // ndef DISABLED_LEGACY_ENGINE
if (tesseract_->SegmentPage(input_file_.c_str(), block_list_, osd_tess, &osr) < 0) { return -1; }
// If Devanagari is being recognized, we use different images for page seg // and for OCR. tesseract_->PrepareForTessOCR(block_list_, osd_tess, &osr); return 0; } /** * Return average gradient of lines on page. */ float TessBaseAPI::GetGradient() { return tesseract_->gradient(); } /** Delete the pageres and clear the block list ready for a new page. */ void TessBaseAPI::ClearResults() { if (tesseract_ != nullptr) { tesseract_->Clear(); } delete page_res_; page_res_ = nullptr; recognition_done_ = false; if (block_list_ == nullptr) { block_list_ = new BLOCK_LIST; } else { block_list_->clear(); } if (paragraph_models_ != nullptr) {
random
<fim_prefix> llama_model_params model_params = common_model_params_to_llama(params); llama_model * model = llama_model_load_from_file(params.model.path.c_str(), model_params); if (model == NULL) { LOG_ERR("%s: error: unable to load model\n" , __func__); return 1; } const llama_vocab * vocab = llama_model_get_vocab(model); // tokenize the prompt std::vector<llama_token> tokens_list; tokens_list = common_tokenize(vocab, params.prompt, true); const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel; // initialize the context llama_context_params ctx_params = common_context_params_to_llama(params);<fim_suffix> llama_context * ctx = llama_init_from_model(model, ctx_params); auto sparams = llama_sampler_chain_default_params(); sparams.no_perf = false; llama_sampler * smpl = llama_sampler_chain_init(sparams); llama_sampler_chain_add(smpl, llama_sampler_init_top_k(params.sampling.top_k)); llama_sampler_chain_add(smpl, llama_sampler_init_top_p(params.sampling.top_p, params.sampling.min_keep)); llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sampling.temp)); llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sampling.seed)); if (ctx == NULL) { LOG_ERR("%s: error: failed to create the llama_context\n" , __func__);<fim_middle> ctx_params.n_ctx = n_kv_req; ctx_params.n_batch = std::max(n_predict, n_parallel);<fim_end>
llama_model_params model_params = common_model_params_to_llama(params); llama_model * model = llama_model_load_from_file(params.model.path.c_str(), model_params); if (model == NULL) { LOG_ERR("%s: error: unable to load model\n" , __func__); return 1; } const llama_vocab * vocab = llama_model_get_vocab(model); // tokenize the prompt std::vector<llama_token> tokens_list; tokens_list = common_tokenize(vocab, params.prompt, true); const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel; // initialize the context llama_context_params ctx_params = common_context_params_to_llama(params);
ctx_params.n_ctx = n_kv_req; ctx_params.n_batch = std::max(n_predict, n_parallel);
llama_context * ctx = llama_init_from_model(model, ctx_params); auto sparams = llama_sampler_chain_default_params(); sparams.no_perf = false; llama_sampler * smpl = llama_sampler_chain_init(sparams); llama_sampler_chain_add(smpl, llama_sampler_init_top_k(params.sampling.top_k)); llama_sampler_chain_add(smpl, llama_sampler_init_top_p(params.sampling.top_p, params.sampling.min_keep)); llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sampling.temp)); llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sampling.seed)); if (ctx == NULL) { LOG_ERR("%s: error: failed to create the llama_context\n" , __func__);
random
<fim_prefix>lear_selection"), &AnimationBezierTrackEdit::_clear_selection); ClassDB::bind_method(D_METHOD("_clear_selection_for_anim"), &AnimationBezierTrackEdit::_clear_selection_for_anim); ClassDB::bind_method(D_METHOD("_select_at_anim"), &AnimationBezierTrackEdit::_select_at_anim); ClassDB::bind_method(D_METHOD("_update_hidden_tracks_after"), &AnimationBezierTrackEdit::_update_hidden_tracks_after); ClassDB::bind_method(D_METHOD("_update_locked_tracks_after"), &AnimationBezierTrackEdit::_update_locked_tracks_after); ClassDB::bind_method(D_METHOD("_bezier_track_insert_key_at_anim"), &AnimationBezierTrackEdit::_bezier_track_insert_key_at_anim, DEFVAL(Animation::HANDLE_SET_MODE_NONE)); ADD_SIGNAL(<fim_suffix>); ADD_SIGNAL(MethodInfo("deselect_key", PropertyInfo(Variant::INT, "index"), PropertyInfo(Variant::INT, "track"))); ADD_SIGNAL(MethodInfo("clear_selection")); ADD_SIGNAL(MethodInfo("timeline_changed", PropertyInfo(Variant::FLOAT, "position"), PropertyInfo(Variant::BOOL, "timeline_only"))); } AnimationBezierTrackEdit::AnimationBezierTrackEdit() { panner.instantiate(); panner->set_callbacks(callable_mp(this, &AnimationBezierTrackEdit::_pan_callback), callable_mp(this, &AnimationBezierTrackEdit::_zoom_callback)); play_position = memnew(Control); play_position->set_mouse_filter(MOUSE_FILTER_PASS); add_child(play_position); play_position->set_anchors_and_offsets_preset(PRESET_FULL_REC<fim_middle>MethodInfo("select_key", PropertyInfo(Variant::INT, "index"), PropertyInfo(Variant::BOOL, "single"), PropertyInfo(Variant::INT, "track"))<fim_end>
lear_selection"), &AnimationBezierTrackEdit::_clear_selection); ClassDB::bind_method(D_METHOD("_clear_selection_for_anim"), &AnimationBezierTrackEdit::_clear_selection_for_anim); ClassDB::bind_method(D_METHOD("_select_at_anim"), &AnimationBezierTrackEdit::_select_at_anim); ClassDB::bind_method(D_METHOD("_update_hidden_tracks_after"), &AnimationBezierTrackEdit::_update_hidden_tracks_after); ClassDB::bind_method(D_METHOD("_update_locked_tracks_after"), &AnimationBezierTrackEdit::_update_locked_tracks_after); ClassDB::bind_method(D_METHOD("_bezier_track_insert_key_at_anim"), &AnimationBezierTrackEdit::_bezier_track_insert_key_at_anim, DEFVAL(Animation::HANDLE_SET_MODE_NONE)); ADD_SIGNAL(
MethodInfo("select_key", PropertyInfo(Variant::INT, "index"), PropertyInfo(Variant::BOOL, "single"), PropertyInfo(Variant::INT, "track"))
); ADD_SIGNAL(MethodInfo("deselect_key", PropertyInfo(Variant::INT, "index"), PropertyInfo(Variant::INT, "track"))); ADD_SIGNAL(MethodInfo("clear_selection")); ADD_SIGNAL(MethodInfo("timeline_changed", PropertyInfo(Variant::FLOAT, "position"), PropertyInfo(Variant::BOOL, "timeline_only"))); } AnimationBezierTrackEdit::AnimationBezierTrackEdit() { panner.instantiate(); panner->set_callbacks(callable_mp(this, &AnimationBezierTrackEdit::_pan_callback), callable_mp(this, &AnimationBezierTrackEdit::_zoom_callback)); play_position = memnew(Control); play_position->set_mouse_filter(MOUSE_FILTER_PASS); add_child(play_position); play_position->set_anchors_and_offsets_preset(PRESET_FULL_REC
ast_based
<fim_prefix> offset_n += moving_selection_offset.x; height_n += moving_selection_offset.y; } else if (scaling_selection) { offset_n += -scaling_selection_offset.x + (offset_n - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1); height_n += -scaling_selection_offset.y + (height_n - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1); } } if (moving_inserted_key && moving_selection_from_track == p_track) { if (moving_selection_from_key == i) { Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(p_track, i); if (handle_mode != Animation::HANDLE_MODE_FREE) { float offset_p = offset; float height_p = height; <fim_suffix> animation->bezier_track_calculate_handles(offset, offset_p, height_p, offset_n, height_n, handle_mode, Animation::HANDLE_SET_MODE_AUTO, nullptr, &out_handle); } } else if (moving_selection_from_key == i_n) { Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(p_track, i_n); if (handle_mode != Animation::HANDLE_MODE_FREE) { float offset_nn = offset_n; float height_nn = height_n; if (E->next()->next()) { int i_nn = E->next()->next()->get(); offset_nn = animation->track_get_key_time(p_track, i_nn); height_nn = animation->bezier_track_get_key_value(p_track, i_nn); } animation->bezier_track_calculate_han<fim_middle>if (E->prev()) { int i_p = E->prev()->get(); offset_p = animation->track_get_key_time(p_track, i_p); height_p = animation->bezier_track_get_key_value(p_track, i_p); }<fim_end>
offset_n += moving_selection_offset.x; height_n += moving_selection_offset.y; } else if (scaling_selection) { offset_n += -scaling_selection_offset.x + (offset_n - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1); height_n += -scaling_selection_offset.y + (height_n - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1); } } if (moving_inserted_key && moving_selection_from_track == p_track) { if (moving_selection_from_key == i) { Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(p_track, i); if (handle_mode != Animation::HANDLE_MODE_FREE) { float offset_p = offset; float height_p = height;
if (E->prev()) { int i_p = E->prev()->get(); offset_p = animation->track_get_key_time(p_track, i_p); height_p = animation->bezier_track_get_key_value(p_track, i_p); }
animation->bezier_track_calculate_handles(offset, offset_p, height_p, offset_n, height_n, handle_mode, Animation::HANDLE_SET_MODE_AUTO, nullptr, &out_handle); } } else if (moving_selection_from_key == i_n) { Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(p_track, i_n); if (handle_mode != Animation::HANDLE_MODE_FREE) { float offset_nn = offset_n; float height_nn = height_n; if (E->next()->next()) { int i_nn = E->next()->next()->get(); offset_nn = animation->track_get_key_time(p_track, i_nn); height_nn = animation->bezier_track_get_key_value(p_track, i_nn); } animation->bezier_track_calculate_han
ast_based
<fim_prefix> LOG_INF("%s: Allocating [%d] x [%d] = [%d] float space for w->wcls\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim); } } catch (std::length_error &) { die("Invalid configuration. Failed to allocate memory for weights"); } } static int checkpoint_init_weights(TransformerWeights * w, const Config * p, FILE * f, bool shared_weights) { if (fread(w->token_embedding_table.data(), sizeof(float), w->token_embedding_table.size(), f) != w->token_embedding_table.size()) return 1; if (fread(w->rms_att_weight.data(), sizeof(float), w->rms_att_weight.size(), f) != w->rms_att_weight.size()) return 1;<fim_suffix> if (fread(w->wv.data(), sizeof(float), w->wv.size(), f) != w->wv.size()) return 1; if (fread(w->wo.data(), sizeof(float), w->wo.size(), f) != w->wo.size()) return 1; if (fread(w->rms_ffn_weight.data(), sizeof(float), w->rms_ffn_weight.size(), f) != w->rms_ffn_weight.size()) return 1; if (fread(w->w1.data(), sizeof(float), w->w1.size(), f) != w->w1.size()) return 1; if (fread(w->w2.data(), sizeof(float), w->w2.size(), f) != w->w2.size()) return 1; if (fread(w->w3.data(), sizeof(float), w->w3.size(), f) != w->w3.size()) return 1; if (fread(w->rms_final_weight.data(), sizeof(float), w->rms_final_weight.size(), f) != w->rms_final_weight.size()) return 1; <fim_middle> if (fread(w->wq.data(), sizeof(float), w->wq.size(), f) != w->wq.size()) return 1; if (fread(w->wk.data(), sizeof(float), w->wk.size(), f) != w->wk.size()) return 1;<fim_end>
LOG_INF("%s: Allocating [%d] x [%d] = [%d] float space for w->wcls\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim); } } catch (std::length_error &) { die("Invalid configuration. Failed to allocate memory for weights"); } } static int checkpoint_init_weights(TransformerWeights * w, const Config * p, FILE * f, bool shared_weights) { if (fread(w->token_embedding_table.data(), sizeof(float), w->token_embedding_table.size(), f) != w->token_embedding_table.size()) return 1; if (fread(w->rms_att_weight.data(), sizeof(float), w->rms_att_weight.size(), f) != w->rms_att_weight.size()) return 1;
if (fread(w->wq.data(), sizeof(float), w->wq.size(), f) != w->wq.size()) return 1; if (fread(w->wk.data(), sizeof(float), w->wk.size(), f) != w->wk.size()) return 1;
if (fread(w->wv.data(), sizeof(float), w->wv.size(), f) != w->wv.size()) return 1; if (fread(w->wo.data(), sizeof(float), w->wo.size(), f) != w->wo.size()) return 1; if (fread(w->rms_ffn_weight.data(), sizeof(float), w->rms_ffn_weight.size(), f) != w->rms_ffn_weight.size()) return 1; if (fread(w->w1.data(), sizeof(float), w->w1.size(), f) != w->w1.size()) return 1; if (fread(w->w2.data(), sizeof(float), w->w2.size(), f) != w->w2.size()) return 1; if (fread(w->w3.data(), sizeof(float), w->w3.size(), f) != w->w3.size()) return 1; if (fread(w->rms_final_weight.data(), sizeof(float), w->rms_final_weight.size(), f) != w->rms_final_weight.size()) return 1;
random
<fim_prefix> accesskit_node_set_bold(ae->node); } if (TS->font_get_style(font_rid).has_flag(TextServer::FONT_ITALIC)) { accesskit_node_set_italic(ae->node); } accesskit_node_set_font_weight(ae->node, TS->font_get_weight(font_rid)); } accesskit_node_set_font_size(ae->node, TS->shaped_get_run_font_size(p_shaped_text, i)); CharString language = TS->shaped_get_run_language(p_shaped_text, i).utf8(); if (language.length() > 0) { accesskit_node_set_language(ae->node, language.ptr()); } accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT); accesskit_rect rect; rect.x0 = run_off_x; rect.y0 = 0;<fim_suffix> accesskit_node_add_action(ae->node, ACCESSKIT_ACTION_SCROLL_INTO_VIEW); run_off_x += size_x; } { // Add "\n" at the end. AccessibilityElement *ae = memnew(AccessibilityElement); ae->role = ACCESSKIT_ROLE_TEXT_RUN; ae->window_id = parent_ae->window_id; ae->parent = root_rid; ae->run = Vector3i(full_range.y, full_range.y, run_count); ae->node = accesskit_node_new(ae->role); text_elements.push_back(ae); Vector<uint8_t> char_lengths; char_lengths.push_back(1); accesskit_node_set_value(ae->node, "\n"); accesskit_node_set_character_lengths(ae->node, char_lengths.size(), char_lengths.ptr()); Vector<float> char_positions; Vector<float> char_widths;<fim_middle> rect.x1 = run_off_x + size_x; rect.y1 = text_height; accesskit_node_set_bounds(ae->node, rect);<fim_end>
accesskit_node_set_bold(ae->node); } if (TS->font_get_style(font_rid).has_flag(TextServer::FONT_ITALIC)) { accesskit_node_set_italic(ae->node); } accesskit_node_set_font_weight(ae->node, TS->font_get_weight(font_rid)); } accesskit_node_set_font_size(ae->node, TS->shaped_get_run_font_size(p_shaped_text, i)); CharString language = TS->shaped_get_run_language(p_shaped_text, i).utf8(); if (language.length() > 0) { accesskit_node_set_language(ae->node, language.ptr()); } accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT); accesskit_rect rect; rect.x0 = run_off_x; rect.y0 = 0;
rect.x1 = run_off_x + size_x; rect.y1 = text_height; accesskit_node_set_bounds(ae->node, rect);
accesskit_node_add_action(ae->node, ACCESSKIT_ACTION_SCROLL_INTO_VIEW); run_off_x += size_x; } { // Add "\n" at the end. AccessibilityElement *ae = memnew(AccessibilityElement); ae->role = ACCESSKIT_ROLE_TEXT_RUN; ae->window_id = parent_ae->window_id; ae->parent = root_rid; ae->run = Vector3i(full_range.y, full_range.y, run_count); ae->node = accesskit_node_new(ae->role); text_elements.push_back(ae); Vector<uint8_t> char_lengths; char_lengths.push_back(1); accesskit_node_set_value(ae->node, "\n"); accesskit_node_set_character_lengths(ae->node, char_lengths.size(), char_lengths.ptr()); Vector<float> char_positions; Vector<float> char_widths;
random
<fim_prefix> grpc_channel_destroy(c_channel_); CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_relaxed); if (callback_cq != nullptr) { if (grpc_iomgr_run_in_background()) { // gRPC-core provides the backing needed for the preferred CQ type callback_cq->Shutdown(); } else { CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq); } } } namespace { inline grpc_slice SliceFromArray(const char* arr, size_t len) { return grpc_slice_from_copied_buffer(arr, len); } std::string GetChannelInfoField(grpc_channel* channel, grpc_channel_info* channel_info, char*** channel_info_field) {<fim_suffix> if (value == nullptr) return ""; std::string result = value; gpr_free(value); return result; } } // namespace std::string Channel::GetLoadBalancingPolicyName() const { grpc_channel_info channel_info; return GetChannelInfoField(c_channel_, &channel_info, &channel_info.lb_policy_name); } std::string Channel::GetServiceConfigJSON() const { grpc_channel_info channel_info; return GetChannelInfoField(c_channel_, &channel_info, &channel_info.service_config_json); } namespace experimental { void ChannelResetConnectionBackoff(Channel* channel) { grpc_channel_reset_connect_backoff(channel->c_channel_); } <fim_middle> char* value = nullptr; memset(channel_info, 0, sizeof(*channel_info)); *channel_info_field = &value; grpc_channel_get_info(channel, channel_info);<fim_end>
grpc_channel_destroy(c_channel_); CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_relaxed); if (callback_cq != nullptr) { if (grpc_iomgr_run_in_background()) { // gRPC-core provides the backing needed for the preferred CQ type callback_cq->Shutdown(); } else { CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq); } } } namespace { inline grpc_slice SliceFromArray(const char* arr, size_t len) { return grpc_slice_from_copied_buffer(arr, len); } std::string GetChannelInfoField(grpc_channel* channel, grpc_channel_info* channel_info, char*** channel_info_field) {
char* value = nullptr; memset(channel_info, 0, sizeof(*channel_info)); *channel_info_field = &value; grpc_channel_get_info(channel, channel_info);
if (value == nullptr) return ""; std::string result = value; gpr_free(value); return result; } } // namespace std::string Channel::GetLoadBalancingPolicyName() const { grpc_channel_info channel_info; return GetChannelInfoField(c_channel_, &channel_info, &channel_info.lb_policy_name); } std::string Channel::GetServiceConfigJSON() const { grpc_channel_info channel_info; return GetChannelInfoField(c_channel_, &channel_info, &channel_info.service_config_json); } namespace experimental { void ChannelResetConnectionBackoff(Channel* channel) { grpc_channel_reset_connect_backoff(channel->c_channel_); }
random
<fim_prefix> =*/ n_tensors*ggml_tensor_overhead(), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; ggml_context * buft_ctx = ggml_init(params); if (!buft_ctx) { return nullptr; } ctx_map[buft] = buft_ctx; adapter.ctxs.emplace_back(buft_ctx); return buft_ctx; }; return it->second; }; // bundle lora_a and lora_b into pairs std::map<std::string, llama_adapter_lora_weight> ab_map; auto str_endswith = [](const std::string & str, const std::string & suffix) { return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), <fim_suffix>, suffix) == 0; }; for (ggml_tensor * cur = ggml_get_first_tensor(ctx.get()); cur; cur = ggml_get_next_tensor(ctx.get(), cur)) { std::string name(cur->name); if (str_endswith(name, ".lora_a")) { replace_all(name, ".lora_a", ""); if (ab_map.find(name) == ab_map.end()) { ab_map[name] = llama_adapter_lora_weight(cur, nullptr); } else { ab_map[name].a = cur; } } else if (str_endswith(name, ".lora_b")) { replace_all(name, ".lora_b", ""); if (ab_map.find(name) == ab_map.end()) { ab_map[name] = llama_adapter_lora_weight(nullptr, cur); }<fim_middle>suffix.size()<fim_end>
=*/ n_tensors*ggml_tensor_overhead(), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; ggml_context * buft_ctx = ggml_init(params); if (!buft_ctx) { return nullptr; } ctx_map[buft] = buft_ctx; adapter.ctxs.emplace_back(buft_ctx); return buft_ctx; }; return it->second; }; // bundle lora_a and lora_b into pairs std::map<std::string, llama_adapter_lora_weight> ab_map; auto str_endswith = [](const std::string & str, const std::string & suffix) { return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(),
suffix.size()
, suffix) == 0; }; for (ggml_tensor * cur = ggml_get_first_tensor(ctx.get()); cur; cur = ggml_get_next_tensor(ctx.get(), cur)) { std::string name(cur->name); if (str_endswith(name, ".lora_a")) { replace_all(name, ".lora_a", ""); if (ab_map.find(name) == ab_map.end()) { ab_map[name] = llama_adapter_lora_weight(cur, nullptr); } else { ab_map[name].a = cur; } } else if (str_endswith(name, ".lora_b")) { replace_all(name, ".lora_b", ""); if (ab_map.find(name) == ab_map.end()) { ab_map[name] = llama_adapter_lora_weight(nullptr, cur); }
ast_based
<fim_prefix>} /** * Get textlines and strips of image regions as a leptonica-style Boxa, Pixa * pair, in reading order. Enables downstream handling of non-rectangular * regions. * Can be called before or after Recognize. * If blockids is not nullptr, the block-id of each line is also returned as an * array of one element per line. delete [] after use. */ Boxa *TessBaseAPI::GetStrips(Pixa **pixa, int **blockids) { return GetComponentImages(RIL_TEXTLINE, false, pixa, blockids); } /** * Get the words as a leptonica-style * Boxa, Pixa pair, in reading order. * Can be called before or after Recognize. */<fim_suffix>} /** * Gets the individual connected (text) components (created * after pages segmentation step, but before recognition) * as a leptonica-style Boxa, Pixa pair, in reading order. * Can be called before or after Recognize. */ Boxa *TessBaseAPI::GetConnectedComponents(Pixa **pixa) { return GetComponentImages(RIL_SYMBOL, true, pixa, nullptr); } /** * Get the given level kind of components (block, textline, word etc.) as a * leptonica-style Boxa, Pixa pair, in reading order. * Can be called before or after Recognize. * If blockids is not nullptr, the block-id of each component is also returned * as an array of one element per component. delete [] after use.<fim_middle>Boxa *TessBaseAPI::GetWords(Pixa **pixa) { return GetComponentImages(RIL_WORD, true, pixa, nullptr);<fim_end>
} /** * Get textlines and strips of image regions as a leptonica-style Boxa, Pixa * pair, in reading order. Enables downstream handling of non-rectangular * regions. * Can be called before or after Recognize. * If blockids is not nullptr, the block-id of each line is also returned as an * array of one element per line. delete [] after use. */ Boxa *TessBaseAPI::GetStrips(Pixa **pixa, int **blockids) { return GetComponentImages(RIL_TEXTLINE, false, pixa, blockids); } /** * Get the words as a leptonica-style * Boxa, Pixa pair, in reading order. * Can be called before or after Recognize. */
Boxa *TessBaseAPI::GetWords(Pixa **pixa) { return GetComponentImages(RIL_WORD, true, pixa, nullptr);
} /** * Gets the individual connected (text) components (created * after pages segmentation step, but before recognition) * as a leptonica-style Boxa, Pixa pair, in reading order. * Can be called before or after Recognize. */ Boxa *TessBaseAPI::GetConnectedComponents(Pixa **pixa) { return GetComponentImages(RIL_SYMBOL, true, pixa, nullptr); } /** * Get the given level kind of components (block, textline, word etc.) as a * leptonica-style Boxa, Pixa pair, in reading order. * Can be called before or after Recognize. * If blockids is not nullptr, the block-id of each component is also returned * as an array of one element per component. delete [] after use.
random
End of preview. Expand in Data Studio

fim-dataset-512

A Fill-in-the-Middle (FIM) dataset for code autocompletion.

Dataset Description

This dataset is designed for training code autocompletion models using the Fill-in-the-Middle (FIM) approach. The dataset contains code snippets formatted with FIM special tokens:

  • <fim_prefix>: Code before the completion point
  • <fim_suffix>: Code after the completion point
  • <fim_middle>: The code to be completed

Dataset Structure

Data Fields

  • text: The formatted FIM instruction containing prefix, suffix, and middle tokens

Data Splits

Split Examples
train 33,140
validation 1,744

Usage

from datasets import load_dataset

# Load the dataset
dataset = load_dataset("KrzTyb/fim-dataset-512")

# Access train and validation splits
train_data = dataset["train"]
val_data = dataset["validation"]

# Example: Print first training example
print(train_data[0]["text"])

Training

This dataset can be used to fine-tune code language models for autocompletion:

from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer
from datasets import load_dataset

# Load model and tokenizer
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-Coder-0.5B")
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B")

# Add FIM tokens
fim_tokens = ["<fim_prefix>", "<fim_suffix>", "<fim_middle>", "<fim_pad>"]
tokenizer.add_special_tokens({"additional_special_tokens": fim_tokens})
model.resize_token_embeddings(len(tokenizer))

# Load dataset
dataset = load_dataset("KrzTyb/fim-dataset-512")

# Train (see full training script for details)
trainer = Trainer(
    model=model,
    train_dataset=dataset["train"],
    eval_dataset=dataset["validation"],
    # ... other training arguments
)
trainer.train()

Citation

If you use this dataset, please cite:

@dataset{KrzTyb_fim-dataset-512},
  title={fim-dataset-512},
  author={Dataset Creator},
  year={2025},
  publisher={Hugging Face},
  howpublished={\url{https://huggingface.co/datasets/KrzTyb/fim-dataset-512}}
}

License

Please specify the license for your dataset.

Downloads last month
18