@article{caro_2021, 
  title={Undecidability of Learnability}, 
  abstractNote={Machine learning researchers and practitioners steadily enlarge the multitude
of successful learning models. They achieve this through in-depth theoretical
analyses and experiential heuristics. However, there is no known
general-purpose procedure for rigorously evaluating whether newly proposed
models indeed successfully learn from data. We show that such a procedure
cannot exist. For PAC binary classification, uniform and universal online
learning, and exact learning through teacher-learner interactions, learnability
is in general undecidable, both in the sense of independence of the axioms in a
formal system and in the sense of uncomputability. Our proofs proceed via
computable constructions of function classes that encode the consistency
problem for formal systems and the halting problem for Turing machines into
complexity measures that characterize learnability. Our work shows that
undecidability appears in the theoretical foundations of machine learning:
There is no one-size-fits-all algorithm for deciding whether a machine learning
model can be successful. We cannot in general automatize the process of
assessing new learning models.}, 
  author={Caro}, 
  year={2021}, 
  month={Aug}
  }