{"created":"2021-03-01T06:16:07.928523+00:00","id":9368,"links":{},"metadata":{"_buckets":{"deposit":"01afcf25-d6aa-41c6-8885-02b3f2a68a59"},"_deposit":{"id":"9368","owners":[],"pid":{"revision_id":0,"type":"depid","value":"9368"},"status":"published"},"_oai":{"id":"oai:nagoya.repo.nii.ac.jp:00009368","sets":["312:598:599"]},"author_link":["26803","26804","26805","26806"],"item_10_biblio_info_6":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicIssueDates":{"bibliographicIssueDate":"2008-03","bibliographicIssueDateType":"Issued"},"bibliographicPageEnd":"184","bibliographicPageStart":"181","bibliographic_titles":[{"bibliographic_title":"IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)","bibliographic_titleLang":"en"}]}]},"item_10_description_4":{"attribute_name":"抄録","attribute_value_mlt":[{"subitem_description":"A sound field reproduction method which uses blind source separation and head-related transfer function is proposed. In the proposed system, multichannel acoustic signals captured at the distant microphones are encoded to a set of location/signal pairs of virtual sound sources based on frequency-domain ICA. After estimating the locations and the signals of the virtual sources, by convolving the controlled acoustic transfer functions with each signal, the spatial sound at the selected point is constructed. In the evaluation, the sound field made by 6 sound sources is captured using 48 distant microphones and is encoded into set of virtual sound sources. Subjective evaluation shows that there is no significant difference between natural and reconstructed sound when more than 6 virtual sources are used. Therefore the effectiveness of the encoding algorithm as well as the virtual source representation is confirmed.","subitem_description_language":"en","subitem_description_type":"Abstract"}]},"item_10_identifier_60":{"attribute_name":"URI","attribute_value_mlt":[{"subitem_identifier_type":"HDL","subitem_identifier_uri":"http://hdl.handle.net/2237/11143"}]},"item_10_publisher_32":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"IEEE","subitem_publisher_language":"en"}]},"item_10_relation_11":{"attribute_name":"DOI","attribute_value_mlt":[{"subitem_relation_type":"isVersionOf","subitem_relation_type_id":{"subitem_relation_type_id_text":"http://doi.org/10.1109/ICASSP.2008.4517576","subitem_relation_type_select":"DOI"}}]},"item_10_relation_8":{"attribute_name":"ISBN","attribute_value_mlt":[{"subitem_relation_type":"isPartOf","subitem_relation_type_id":{"subitem_relation_type_id_text":"978-1-4244-1483-3","subitem_relation_type_select":"ISBN"}}]},"item_10_rights_12":{"attribute_name":"権利","attribute_value_mlt":[{"subitem_rights":"Copyright © 2008 IEEE. Reprinted from IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP, 2008, p.181-184. This material is posted here with permission of the IEEE. Such permission of the IEEE does not in any way imply IEEE endorsement of any of Nagoya University’s products or services. Internal or personal use of this material is permitted. However, permission to reprint/republish this material for advertising or promotional purposes or for creating new collective works for resale or redistribution must be obtained from the IEEE by writing to pubs-permissions@ieee.org.","subitem_rights_language":"en"}]},"item_10_select_15":{"attribute_name":"著者版フラグ","attribute_value_mlt":[{"subitem_select_item":"publisher"}]},"item_10_source_id_7":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"1520-6149","subitem_source_identifier_type":"PISSN"}]},"item_10_text_14":{"attribute_name":"フォーマット","attribute_value_mlt":[{"subitem_text_value":"application/pdf"}]},"item_1615787544753":{"attribute_name":"出版タイプ","attribute_value_mlt":[{"subitem_version_resource":"http://purl.org/coar/version/c_970fb48d4fbd8a85","subitem_version_type":"VoR"}]},"item_access_right":{"attribute_name":"アクセス権","attribute_value_mlt":[{"subitem_access_right":"open access","subitem_access_right_uri":"http://purl.org/coar/access_right/c_abf2"}]},"item_creator":{"attribute_name":"著者","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"NIWA, Kenta","creatorNameLang":"en"}],"nameIdentifiers":[{"nameIdentifier":"26803","nameIdentifierScheme":"WEKO"}]},{"creatorNames":[{"creatorName":"NISHINO, Takanori","creatorNameLang":"en"}],"nameIdentifiers":[{"nameIdentifier":"26804","nameIdentifierScheme":"WEKO"}]},{"creatorNames":[{"creatorName":"西野, 隆典","creatorNameLang":"ja"}],"nameIdentifiers":[{"nameIdentifier":"26805","nameIdentifierScheme":"WEKO"}]},{"creatorNames":[{"creatorName":"TAKEDA, Kazuya","creatorNameLang":"en"}],"nameIdentifiers":[{"nameIdentifier":"26806","nameIdentifierScheme":"WEKO"}]}]},"item_files":{"attribute_name":"ファイル情報","attribute_type":"file","attribute_value_mlt":[{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2018-02-20"}],"displaytype":"detail","filename":"0000181-1.pdf","filesize":[{"value":"325.1 kB"}],"format":"application/pdf","licensetype":"license_note","mimetype":"application/pdf","url":{"label":"0000181-1.pdf","objectType":"fulltext","url":"https://nagoya.repo.nii.ac.jp/record/9368/files/0000181-1.pdf"},"version_id":"08dfd80a-3714-4720-ab24-f55e8c662377"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"Acoustic arrays","subitem_subject_scheme":"Other"},{"subitem_subject":"Acoustic fields","subitem_subject_scheme":"Other"},{"subitem_subject":"Acoustic beam steering","subitem_subject_scheme":"Other"},{"subitem_subject":"Array signal processing","subitem_subject_scheme":"Other"},{"subitem_subject":"Spatial filters","subitem_subject_scheme":"Other"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourcetype":"journal article","resourceuri":"http://purl.org/coar/resource_type/c_6501"}]},"item_title":"Encoding large array signals into a 3D sound field representation for selective listening point audio based on blind source separation","item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Encoding large array signals into a 3D sound field representation for selective listening point audio based on blind source separation","subitem_title_language":"en"}]},"item_type_id":"10","owner":"1","path":["599"],"pubdate":{"attribute_name":"PubDate","attribute_value":"2009-02-23"},"publish_date":"2009-02-23","publish_status":"0","recid":"9368","relation_version_is_last":true,"title":["Encoding large array signals into a 3D sound field representation for selective listening point audio based on blind source separation"],"weko_creator_id":"1","weko_shared_id":-1},"updated":"2023-01-16T04:33:49.093865+00:00"}