Skip to content

Commit

Permalink
added preprint link to brockbank paper
Browse files Browse the repository at this point in the history
  • Loading branch information
tobiasgerstenberg committed Jun 11, 2024
1 parent ba7e895 commit 9816d43
Show file tree
Hide file tree
Showing 10 changed files with 29 additions and 21 deletions.
2 changes: 1 addition & 1 deletion content/publication/brockbank2024monster.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ image_preview = ""
selected = false
projects = []
url_pdf = "papers/brockbank2024monster.pdf"
url_preprint = ""
url_preprint = "https://osf.io/preprints/psyarxiv/3wzbk"
url_code = ""
url_dataset = ""
url_slides = ""
Expand Down
2 changes: 1 addition & 1 deletion content/publication/gerstenberg2024counterfactual.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ publication_short = "_Trends in Cognitive Sciences_"
publication = "Gerstenberg T. (2024). Counterfactual simulation in causal cognition. _Trends in Cognitive Sciences_."
abstract = "How do people make causal judgments and assign responsibility? In this paper, I show that counterfactual simulations are key. To simulate counterfactuals, we need three ingredients: a generative mental model of the world, the ability to perform counterfactual interventions on that model, and the capacity to simulate the consequences of these interventions. The counterfactual simulation model (CSM) uses these ingredients to capture people's intuitive understanding of the physical and social world. In the physical domain, the CSM predicts people's causal judgments about dynamic collision events, complex situations that involve multiple causes, omissions as causes, and causes that sustain physical stability. In the social domain, the CSM predicts responsibility judgments in helping and hindering scenarios."
image_preview = ""
selected = false
selected = true
projects = []
url_pdf = "papers/gerstenberg2024counterfactual.pdf"
url_preprint = "https://osf.io/preprints/psyarxiv/72scr"
Expand Down
2 changes: 1 addition & 1 deletion content/publication/zhou2023jenga.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ publication_short = "_Journal of Experimental Psychology: General_"
publication = "Zhou L., Smith K. A., Tenenbaum J. B., Gerstenberg T. (2023). Mental Jenga: A counterfactual simulation model of causal judgments about physical support. _Journal of Experimental Psychology: General_"
abstract = "From building towers to picking an orange from a stack of fruit, assessing support is critical for successfully interacting with the physical world. But how do people determine whether one object supports another? In this paper, we develop the Counterfactual Simulation Model (CSM) of causal judgments about physical support. The CSM predicts that people judge physical support by mentally simulating what would happen to a scene if the object of interest were removed. Three experiments test the model by asking one group of participants to judge what would happen to a tower if one of the blocks were removed, and another group of participants how responsible that block was for the tower's stability. The CSM accurately captures participants' predictions by running noisy simulations that incorporate different sources of uncertainty. Participants' responsibility judgments are closely related to counterfactual predictions: a block is more responsible when many other blocks would fall if it were removed. By construing physical support as preventing from falling, the CSM provides a unified account of how causal judgments in dynamic and static physical scenes arise from the process of counterfactual simulation."
image_preview = ""
selected = true
selected = false
projects = []
url_pdf = "papers/zhou2023jenga.pdf"
url_preprint = "https://psyarxiv.com/4a5uh"
Expand Down
6 changes: 3 additions & 3 deletions docs/bibtex/cic_papers.bib
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
%% This BibTeX bibliography file was created using BibDesk.
%% https://bibdesk.sourceforge.io/
%% Created for Tobias Gerstenberg at 2024-06-07 16:50:56 -0700
%% Created for Tobias Gerstenberg at 2024-06-07 21:28:33 -0700
%% Saved with string encoding Unicode (UTF-8)
Expand Down Expand Up @@ -40,10 +40,10 @@ @inproceedings{tsirtsis2024sequential

@inproceedings{keshmirian2024chain,
abstract = {Causal reasoning is important for humans and artificial intelligence (AI). Causal Bayesian Networks (CBNs) model causal relationships using directed links between nodes in a network. Deviations from their edicts result in biased judgments. This study explores one such bias by examining two structures in CBNs: canonical Chain (A→C→B) and Common Cause (A←C→B) networks. In these structures, if C is known, the probability of the outcome (B) is normatively independent of the initial cause (A). But humans often ignore this independence. We tested mutually exclusive predictions of three theories that could account for this bias (N=300). Our results show that humans perceive causes in Chain structures as significantly stronger, supporting only one of the hypotheses. The increased perceived causal power might reflect a view of intermediate causes as more reflective of reliable mechanisms. The bias may stem either from our interventions in the world or how we talk about causality with others. LLMs are primarily trained on language data. Therefore, examining whether they exhibit similar biases can determine the extent to which language is the vehicle of such causal biases, with implications for whether LLMs can abstract causal principles. We therefore, subjected three LLMs, GPT3.5-Turbo, GPT4, and Luminous Supreme Control, to the same queries as our human subjects, adjusting a key `temperature' hyperparameter. We show that at greater randomness levels, LLMs exhibit a similar bias, suggesting it is supported by language use. The absence of item effects suggests a degree of causal principle abstraction in LLMs.},
author = {Keshmirian, Anita and Keshmirian, Anita and Willig, Moritz and Hemmatian, Babak and Hahn, Ulrike and Kersting, Kristian and Gerstenberg, Tobias},
author = {Keshmirian, Anita and Willig, Moritz and Hemmatian, Babak and Hahn, Ulrike and Kersting, Kristian and Gerstenberg, Tobias},
booktitle = {{Proceedings of the 46th Annual Conference of the Cognitive Science Society}},
date-added = {2024-05-12 17:47:58 +0200},
date-modified = {2024-05-12 17:47:58 +0200},
date-modified = {2024-06-07 21:28:31 -0700},
editor = {Larissa K Samuelson and Stefan Frank and Mariya Toneva and Allyson Mackey and Eliot Hazeltine},
title = {{Chain versus common cause: Biased causal strength judgments in humans and large language models}},
year = {2024}}
Expand Down
Binary file removed docs/img/members/helena_vasconcelos.png
Binary file not shown.
20 changes: 8 additions & 12 deletions docs/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -1578,12 +1578,12 @@ <h1>Selected<br>publications</h1>

<div class="pub-list-item" style="margin-bottom: 1rem" itemscope itemtype="http://schema.org/CreativeWork">
<span itemprop="author">
L. Zhou, K. A. Smith, J. B. Tenenbaum, T. Gerstenberg</span>
T. Gerstenberg</span>

(2023).
(2024).

<a href="https://cicl.stanford.edu/publication/zhou2023jenga/" itemprop="name">Mental Jenga: A counterfactual simulation model of causal judgments about physical support</a>.
<em>Journal of Experimental Psychology: General</em>.
<a href="https://cicl.stanford.edu/publication/gerstenberg2024counterfactual/" itemprop="name">Counterfactual simulation in causal cognition</a>.
<em>Trends in Cognitive Sciences</em>.



Expand All @@ -1593,12 +1593,12 @@ <h1>Selected<br>publications</h1>



<a class="btn btn-outline-primary my-1 mr-1 btn-sm" href="https://psyarxiv.com/4a5uh" target="_blank" rel="noopener">
<a class="btn btn-outline-primary my-1 mr-1 btn-sm" href="https://osf.io/preprints/psyarxiv/72scr" target="_blank" rel="noopener">
Preprint
</a>


<a class="btn btn-outline-primary my-1 mr-1 btn-sm" href="https://cicl.stanford.edu/papers/zhou2023jenga.pdf" target="_blank" rel="noopener">
<a class="btn btn-outline-primary my-1 mr-1 btn-sm" href="https://cicl.stanford.edu/papers/gerstenberg2024counterfactual.pdf" target="_blank" rel="noopener">
PDF
</a>

Expand All @@ -1615,12 +1615,8 @@ <h1>Selected<br>publications</h1>



<a class="btn btn-outline-primary my-1 mr-1 btn-sm" href="https://psycnet.apa.org/record/2023-65571-001" target="_blank" rel="noopener">
Link
</a>

<a class="btn btn-outline-primary my-1 mr-1 btn-sm" href="https://github.com/cicl-stanford/mental_jenga" target="_blank" rel="noopener">
Github
<a class="btn btn-outline-primary my-1 mr-1 btn-sm" href="https://hai.stanford.edu/news/humans-use-counterfactuals-reason-about-causality-can-ai" target="_blank" rel="noopener">
Press: HAI News
</a>


Expand Down
4 changes: 4 additions & 0 deletions docs/member/tobias_gerstenberg/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -564,6 +564,10 @@ <h2 id="publications">Publications</h2>



<a class="btn btn-outline-primary my-1 mr-1 btn-sm" href="https://osf.io/preprints/psyarxiv/3wzbk" target="_blank" rel="noopener">
Preprint
</a>


<a class="btn btn-outline-primary my-1 mr-1 btn-sm" href="https://cicl.stanford.edu/papers/brockbank2024monster.pdf" target="_blank" rel="noopener">
PDF
Expand Down
4 changes: 4 additions & 0 deletions docs/publication/brockbank2024monster/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,10 @@ <h3>Abstract</h3>



<a class="btn btn-outline-primary my-1 mr-1" href="https://osf.io/preprints/psyarxiv/3wzbk" target="_blank" rel="noopener">
Preprint
</a>


<a class="btn btn-outline-primary my-1 mr-1" href="https://cicl.stanford.edu/papers/brockbank2024monster.pdf" target="_blank" rel="noopener">
PDF
Expand Down
4 changes: 4 additions & 0 deletions docs/publication/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -1966,6 +1966,10 @@ <h1>Publications</h1>



<a class="btn btn-outline-primary my-1 mr-1 btn-sm" href="https://osf.io/preprints/psyarxiv/3wzbk" target="_blank" rel="noopener">
Preprint
</a>


<a class="btn btn-outline-primary my-1 mr-1 btn-sm" href="https://cicl.stanford.edu/papers/brockbank2024monster.pdf" target="_blank" rel="noopener">
PDF
Expand Down
6 changes: 3 additions & 3 deletions static/bibtex/cic_papers.bib
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
%% This BibTeX bibliography file was created using BibDesk.
%% https://bibdesk.sourceforge.io/
%% Created for Tobias Gerstenberg at 2024-06-07 16:50:56 -0700
%% Created for Tobias Gerstenberg at 2024-06-07 21:28:33 -0700
%% Saved with string encoding Unicode (UTF-8)
Expand Down Expand Up @@ -40,10 +40,10 @@ @inproceedings{tsirtsis2024sequential

@inproceedings{keshmirian2024chain,
abstract = {Causal reasoning is important for humans and artificial intelligence (AI). Causal Bayesian Networks (CBNs) model causal relationships using directed links between nodes in a network. Deviations from their edicts result in biased judgments. This study explores one such bias by examining two structures in CBNs: canonical Chain (A→C→B) and Common Cause (A←C→B) networks. In these structures, if C is known, the probability of the outcome (B) is normatively independent of the initial cause (A). But humans often ignore this independence. We tested mutually exclusive predictions of three theories that could account for this bias (N=300). Our results show that humans perceive causes in Chain structures as significantly stronger, supporting only one of the hypotheses. The increased perceived causal power might reflect a view of intermediate causes as more reflective of reliable mechanisms. The bias may stem either from our interventions in the world or how we talk about causality with others. LLMs are primarily trained on language data. Therefore, examining whether they exhibit similar biases can determine the extent to which language is the vehicle of such causal biases, with implications for whether LLMs can abstract causal principles. We therefore, subjected three LLMs, GPT3.5-Turbo, GPT4, and Luminous Supreme Control, to the same queries as our human subjects, adjusting a key `temperature' hyperparameter. We show that at greater randomness levels, LLMs exhibit a similar bias, suggesting it is supported by language use. The absence of item effects suggests a degree of causal principle abstraction in LLMs.},
author = {Keshmirian, Anita and Keshmirian, Anita and Willig, Moritz and Hemmatian, Babak and Hahn, Ulrike and Kersting, Kristian and Gerstenberg, Tobias},
author = {Keshmirian, Anita and Willig, Moritz and Hemmatian, Babak and Hahn, Ulrike and Kersting, Kristian and Gerstenberg, Tobias},
booktitle = {{Proceedings of the 46th Annual Conference of the Cognitive Science Society}},
date-added = {2024-05-12 17:47:58 +0200},
date-modified = {2024-05-12 17:47:58 +0200},
date-modified = {2024-06-07 21:28:31 -0700},
editor = {Larissa K Samuelson and Stefan Frank and Mariya Toneva and Allyson Mackey and Eliot Hazeltine},
title = {{Chain versus common cause: Biased causal strength judgments in humans and large language models}},
year = {2024}}
Expand Down

0 comments on commit 9816d43

Please sign in to comment.