{"created":"2021-03-01T06:32:52.530956+00:00","id":24885,"links":{},"metadata":{"_buckets":{"deposit":"cf9a0e75-99a8-4d5e-975d-0c1d04c3adc6"},"_deposit":{"id":"24885","owners":[],"pid":{"revision_id":0,"type":"depid","value":"24885"},"status":"published"},"_oai":{"id":"oai:nagoya.repo.nii.ac.jp:00024885","sets":["312:313:314"]},"author_link":["74032","74033","74034"],"item_10_biblio_info_6":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicIssueDates":{"bibliographicIssueDate":"2017-08","bibliographicIssueDateType":"Issued"},"bibliographicPageEnd":"43","bibliographicPageStart":"29","bibliographicVolumeNumber":"79","bibliographic_titles":[{"bibliographic_title":"Journal of Mathematical Psychojogy","bibliographic_titleLang":"en"}]}]},"item_10_description_4":{"attribute_name":"抄録","attribute_value_mlt":[{"subitem_description":"Evaluating the subjective value of events is a crucial task in the investigation of how the brain implements the value-based computations by which living systems make decisions. This task is often not straightforward, especially for animal subjects. In the present paper, we propose a novel model-based method for estimating subjective value from choice behavior. The proposed method is based on reinforcement learning (RL) theory. It draws upon the premise that a subject tends to choose the option that leads to an outcome with a high subjective value. The proposed method consists of two components: (1) a novel behavioral task in which the choice outcome is presented randomly within the same valence category and (2) the model parameter fit of RL models to the behavioral data. We investigated the validity and limitations of the proposed method by conducting several computer simulations. We also applied the proposed method to actual behavioral data from two rats that performed two tasks: one manipulating the reward amount and another manipulating the delay of reward signals. These results demonstrate that reasonable estimates can be obtained using the proposed method.","subitem_description_language":"en","subitem_description_type":"Abstract"}]},"item_10_identifier_60":{"attribute_name":"URI","attribute_value_mlt":[{"subitem_identifier_type":"DOI","subitem_identifier_uri":"http://doi.org/10.1016/j.jmp.2017.05.005"},{"subitem_identifier_type":"HDL","subitem_identifier_uri":"http://hdl.handle.net/2237/27094"}]},"item_10_publisher_32":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"Elsevier","subitem_publisher_language":"en"}]},"item_10_relation_11":{"attribute_name":"DOI","attribute_value_mlt":[{"subitem_relation_type":"isVersionOf","subitem_relation_type_id":{"subitem_relation_type_id_text":"https://doi.org/10.1016/j.jmp.2017.05.005","subitem_relation_type_select":"DOI"}}]},"item_10_rights_12":{"attribute_name":"権利","attribute_value_mlt":[{"subitem_rights":"© 2017. This manuscript version is made available under the CC-BY-NC-ND 4.0 license http://creativecommons.org/licenses/by-nc-nd/4.0/","subitem_rights_language":"en"}]},"item_10_select_15":{"attribute_name":"著者版フラグ","attribute_value_mlt":[{"subitem_select_item":"author"}]},"item_10_source_id_7":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"0022-2496","subitem_source_identifier_type":"PISSN"}]},"item_1615787544753":{"attribute_name":"出版タイプ","attribute_value_mlt":[{"subitem_version_resource":"http://purl.org/coar/version/c_ab4af688f83e57aa","subitem_version_type":"AM"}]},"item_access_right":{"attribute_name":"アクセス権","attribute_value_mlt":[{"subitem_access_right":"open access","subitem_access_right_uri":"http://purl.org/coar/access_right/c_abf2"}]},"item_creator":{"attribute_name":"著者","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Katahira, Kentaro","creatorNameLang":"en"}],"nameIdentifiers":[{"nameIdentifier":"74032","nameIdentifierScheme":"WEKO"}]},{"creatorNames":[{"creatorName":"Yuki, Shoko","creatorNameLang":"en"}],"nameIdentifiers":[{"nameIdentifier":"74033","nameIdentifierScheme":"WEKO"}]},{"creatorNames":[{"creatorName":"Okanoya, Kazuo","creatorNameLang":"en"}],"nameIdentifiers":[{"nameIdentifier":"74034","nameIdentifierScheme":"WEKO"}]}]},"item_files":{"attribute_name":"ファイル情報","attribute_type":"file","attribute_value_mlt":[{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2019-08-01"}],"displaytype":"detail","filename":"Katahira_JMP2017.pdf","filesize":[{"value":"1.7 MB"}],"format":"application/pdf","licensetype":"license_note","mimetype":"application/pdf","url":{"label":"Katahira_JMP2017.pdf ファイル公開日:2019/08/01","objectType":"fulltext","url":"https://nagoya.repo.nii.ac.jp/record/24885/files/Katahira_JMP2017.pdf"},"version_id":"b95c48cd-f7b6-47a1-afe3-26749cf7fef2"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"Subjective value","subitem_subject_scheme":"Other"},{"subitem_subject":"Model-based estimation","subitem_subject_scheme":"Other"},{"subitem_subject":"Reinforcement learning","subitem_subject_scheme":"Other"},{"subitem_subject":"Choice behavior","subitem_subject_scheme":"Other"},{"subitem_subject":"Random feedback","subitem_subject_scheme":"Other"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourcetype":"journal article","resourceuri":"http://purl.org/coar/resource_type/c_6501"}]},"item_title":"Model-based estimation of subjective values using choice tasks with probabilistic feedback","item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Model-based estimation of subjective values using choice tasks with probabilistic feedback","subitem_title_language":"en"}]},"item_type_id":"10","owner":"1","path":["314"],"pubdate":{"attribute_name":"PubDate","attribute_value":"2017-11-14"},"publish_date":"2017-11-14","publish_status":"0","recid":"24885","relation_version_is_last":true,"title":["Model-based estimation of subjective values using choice tasks with probabilistic feedback"],"weko_creator_id":"1","weko_shared_id":-1},"updated":"2023-01-16T04:15:25.693361+00:00"}