@misc{Gyurjyan_Mikayel_K._Approach, author={Gyurjyan, Mikayel K. and Hayrapetyan, Andranik G.}, howpublished={online}, publisher={Изд-во НАН РА}, abstract={Training and deploying BERT models for specific languages, especially low-resource ones, presents a unique set of challenges. These challenges stem from the inherent data scarcity associated with languages like Armenian, the computational demands of training BERT models, often requiring extensive resources, and the inefficiencies in hosting and maintaining models for languages with limited digital traffic. In this research, we introduce a novel methodology that leverages the Armenian Wikipedia as a primary data source, aiming to optimize the performance of BERT for the Armenian language. Our approach demonstrates that, with strategic preprocessing and transfer learning techniques, it's possible to achieve performance metrics that rival those of models trained on more abundant datasets. Furthermore, we explore the potential of fine-tuning pre-trained multilingual BERT models, revealing that they can serve as robust starting points for training models for low-resource but significant languages like Armenian.}, type={Հոդված}, title={Approach and Challenges of Training an Armenian Version of BERT Language Model}, keywords={Mathematical Sciences}, }