## here() starts at /Users/yosemite/githubs/reinstein_web/rmd_work
## [1] TRUE
## Registered S3 method overwritten by 'pryr':
##   method      from
##   print.bytes Rcpp

Preamble

Message

We can now do better than this vague hope!

We can now do better than this vague hope!


The founders of Effective Altruism took ideas from Philosophy, Economics, and other parts of academia to build a rigorous approach to ‘doing the most good in the world’, and to exploring and measuring this.

  • Miraculously, “EA happened”

Miraculously, EA also has a passionate and influential group of supporters, and a substantial pool of funds for research, interventions, and advocacy!


Big opportunity for academic researchers to have impact, inspiration, funding

  • Partnering within academia, EA research audience

  • Grants

  • Helping students

  • Leave academia for greener (?) pastures at an EA-aligned org

EA and global priorities research offers a huge opportunity for academic researchers to have an positive impact (on the allocation of funds, and on the market of ideas).

There are opportunities for funding to support your research within academia, to promote the impact of your research (and gain valuable feedback), to help students find meaningful careers/research

…and/or you may want to leave academia work directly for an EA-aligned organization (like I did).

Things I’m not going to cover:

  • “Things I’m not going to cover” (no time)

What/who is EA? What research/funding is there?

What is EA?

Doing the ‘most good’ given limited resources (some relationship to utilitarianism)…

but how do we define ‘the most good’?

Question: What do EA’s prioritize most?

  1. Global Poverty
  2. Artificial Intelligence Risk
  3. Climate Change
  4. Cause prioritization
  5. Animal welfare

Results of impromptu survey at LIS conference: What they think EA’s prioritize

.kable_styling <- hijack(kableExtra::kable_styling, full_width=FALSE)
.kable <- hijack(knitr::kable, format.args = list(big.mark = ",", scientific = FALSE))
lis_ea_priority_guess <- factor(c(
"Cause prioritization",
"Global Poverty",
"Cause prioritization",
"Climate Change",
"Global Poverty",
"Climate Change",
"Global Poverty",
"Global Poverty",
"Cause prioritization",
"Cause prioritization",
"Climate Change",
"Cause prioritization",
"Climate Change",
"Artificial Intelligence Risk",
"Climate Change",
"Global Poverty",
"Cause prioritization",
"Cause prioritization",
"Climate Change",
"Global Poverty",
"Cause prioritization",
"Cause prioritization",
"Cause prioritization",
"Global Poverty",
"Global Poverty",
"Global Poverty",
"Cause prioritization",
"Climate Change",
"Climate Change",
"Global Poverty",
"Global Poverty",
"Climate Change",
"Global Poverty",
"Global Poverty",
"Cause prioritization",
"Climate Change",
"Global Poverty",
"Global Poverty"
)
)




lis_ea_priority_guess <- tibble(
  lis_ea_priority_guess = lis_ea_priority_guess
)

lis_ea_priority_guess %>% tabg(lis_ea_priority_guess) %>%
  .kable(caption = "What LIS respondents *think* EAs prioritize most:" ) %>%
  .kable_styling()
What LIS respondents think EAs prioritize most:
lis_ea_priority_guess n percent
Global Poverty 15 0.39
Cause prioritization 12 0.32
Climate Change 10 0.26
Artificial Intelligence Risk 1 0.03

2020 EA survey: all responses

(
priority_ordered_bar <- lik_priority_eas20 %>%
      as.data.frame() %>%
    select(-engagement_num) %>%
    likert() %>%
  plot(.,
     type="bar") +
  ggtitle(title)
)