@article{oai:nagoya.repo.nii.ac.jp:00024885, author = {Katahira, Kentaro and Yuki, Shoko and Okanoya, Kazuo}, journal = {Journal of Mathematical Psychojogy}, month = {Aug}, note = {Evaluating the subjective value of events is a crucial task in the investigation of how the brain implements the value-based computations by which living systems make decisions. This task is often not straightforward, especially for animal subjects. In the present paper, we propose a novel model-based method for estimating subjective value from choice behavior. The proposed method is based on reinforcement learning (RL) theory. It draws upon the premise that a subject tends to choose the option that leads to an outcome with a high subjective value. The proposed method consists of two components: (1) a novel behavioral task in which the choice outcome is presented randomly within the same valence category and (2) the model parameter fit of RL models to the behavioral data. We investigated the validity and limitations of the proposed method by conducting several computer simulations. We also applied the proposed method to actual behavioral data from two rats that performed two tasks: one manipulating the reward amount and another manipulating the delay of reward signals. These results demonstrate that reasonable estimates can be obtained using the proposed method.}, pages = {29--43}, title = {Model-based estimation of subjective values using choice tasks with probabilistic feedback}, volume = {79}, year = {2017} }