@article {Yoon581, author = {Chang Ho Yoon and Robert Torrance and Naomi Scheinerman}, title = {Machine learning in medicine: should the pursuit of enhanced interpretability be abandoned?}, volume = {48}, number = {9}, pages = {581--585}, year = {2022}, doi = {10.1136/medethics-2020-107102}, publisher = {Institute of Medical Ethics}, abstract = {We argue why interpretability should have primacy alongside empiricism for several reasons: first, if machine learning (ML) models are beginning to render some of the high-risk healthcare decisions instead of clinicians, these models pose a novel medicolegal and ethical frontier that is incompletely addressed by current methods of appraising medical interventions like pharmacological therapies; second, a number of judicial precedents underpinning medical liability and negligence are compromised when {\textquoteleft}autonomous{\textquoteright} ML recommendations are considered to be en par with human instruction in specific contexts; third, explainable algorithms may be more amenable to the ascertainment and minimisation of biases, with repercussions for racial equity as well as scientific reproducibility and generalisability. We conclude with some reasons for the ineludible importance of interpretability, such as the establishment of trust, in overcoming perhaps the most difficult challenge ML will face in a high-stakes environment like healthcare: professional and public acceptance.There are no data in this work.}, issn = {0306-6800}, URL = {https://jme.bmj.com/content/48/9/581}, eprint = {https://jme.bmj.com/content/48/9/581.full.pdf}, journal = {Journal of Medical Ethics} }