@article{2688, keywords = {Computer vision, Neural Network, Natural Language Processing, Stochastic Gradient Descent., Long Short Term Memory (LSTM)}, author = {Sudan Jha and Anirban Dey and Raghvendra Kumar and Vijender Kumar-Solanki}, title = {A Novel Approach on Visual Question Answering by Parameter Prediction using Faster Region Based Convolutional Neural Network}, abstract = {Visual Question Answering (VQA) is a stimulating process in the field of Natural Language Processing (NLP) and Computer Vision (CV). In this process machine can find an answer to a natural language question which is related to an image. Question can be open-ended or multiple choice. Datasets of VQA contain mainly three components; questions, images and answers. Researchers overcome the VQA problem with deep learning based architecture that jointly combines both of two networks i.e. Convolution Neural Network (CNN) for visual (image) representation and Recurrent Neural Network (RNN) with Long Short Time Memory (LSTM) for textual (question) representation and trained the combined network end to end to generate the answer. Those models are able to answer the common and simple questions that are directly related to the image’s content. But different types of questions need different level of understanding to produce correct answers. To solve this problem, we use faster Region based-CNN (R-CNN) for extracting image features with an extra fully connected layer whose weights are dynamically obtained by LSTMs cell according to the question. We claim in this paper that a single R-CNN architecture can solve the problems related to VQA by modifying weights in the parameter prediction layer. Authors trained the network end to end by Stochastic Gradient Descent (SGD) using pretrained faster R-CNN and LSTM and tested it on benchmark datasets of VQA.}, year = {2019}, journal = {International Journal of Interactive Multimedia and Artificial Intelligence}, volume = {5}, number = {5}, pages = {30-37}, month = {06/2019}, issn = {1989-1660}, url = {https://www.ijimai.org/journal/sites/default/files/files/2018/08/ijimai_5_5_4_pdf_36854.pdf}, doi = {10.9781/ijimai.2018.08.004}, }