@article{3487, keywords = {Deep Learning, Feature Extraction, Image Processing, InceptionNet V3, ResNet50, ResNet9}, author = {Kanchan Varpe and Sachin Sakhare}, title = {Reading Modi Lipi: A Deep Learning Journey in Character Recognition}, abstract = {Advancements in deep learning methodologies have played a significant role in the success of various character recognition processes. Character recognition refers to the technique of identifying either handwritten or printed characters from documents and their conversion into a form that can be read by machines. MODI script, an ancient Indian script, is categorized under the Devanagari script and holds historical significance. Despite its historical importance, there are only a few MODI translators available. Conversely, there exist a vast number of historical documents written in MODI that are yet to be deciphered. Recognizing characters in Indian language scripts poses many challenges due to the complex nature of the scripts and variations in individuals' writing styles. This paper provides an overview of the newest advancements in the Handwritten Optical Character Recognition (HWCR) methodology specifically designed for the MODI script. Utilization of residual networks and inception in image classification has gained popularity in recent times. In this paper the authors have implemented three techniques: ResNet9, ResNet50, and InceptionNet V3, trained specifically for handwritten MODI characters and vowels. The dataset used for training the models consists of handwritten MODI script images. The benchmark database from IEEE data port for handwritten MODI script is used to evaluate the performance. The dataset contains 46 classes, including 10 vowel classes and 36 consonant classes. Each class comprises 90 images, resulting in a total of 4140 images. The image size in the dataset is 227×227. The accuracy achieved by the trained models is as follows: 98.92% for ResNet9, 91.91% for ResNet50, and 86% for Inception Net V3. The obtained results have been compared with existing models and it is observed that the proposed model attained improved performance parameters and less training and validation losses in comparison to existing methods. There are several advantages of the proposed model in comparison to state of the art, namely minimal training and validation loss. In addition to this, the proposed approach improved generalization and robustness, and improved model scalability.}, year = {2024}, journal = {International Journal of Interactive Multimedia and Artificial Intelligence}, volume = {9}, chapter = {75}, number = {1}, pages = {75-83}, month = {12/2024}, issn = {1989-1660}, url = {https://www.ijimai.org/journal/bibcite/reference/3487}, doi = {10.9781/ijimai.2024.09.002}, }